repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
BouncyKoishi/ChuCaoQi-Bot | plugins/draw_item.py | [
{
"identifier": "config",
"path": "kusa_base.py",
"snippet": "async def isUserExist(qqNum) -> bool:\nasync def isSuperAdmin(qqNum) -> bool:\nasync def buying(qqNum, itemNameBuying, itemAmountBuying, totalPrice, isUsingAdvKusa=False) -> bool:\nasync def selling(qqNum, itemNameSelling, itemAmountSelling, ... | import random
import nonebot
import dbConnection.db as baseDB
import dbConnection.draw_item as drawItemDB
import dbConnection.kusa_item as usefulItemDB
from nonebot import on_command, CommandSession
from kusa_base import config
from utils import nameDetailSplit
from itertools import groupby
from decorator import CQ_injection_check_command | 1,735 | userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowDraw']:
await session.send('本群暂不支持抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await ban(groupId, userId)
return
banRisk = drawConfig['banRisk']
banShieldInfo = await usefulItemDB.getItemStorageInfo(userId, '量子护盾')
if banShieldInfo and banShieldInfo.allowUse:
await usefulItemDB.changeItemAmount(userId, '量子护盾', -1)
banRisk = banRisk / 10
if random.random() < banRisk:
await ban(groupId, userId)
return
strippedArg = session.current_arg_text.strip()
await getItem(groupId, userId, strippedArg)
@on_command(name='十连抽', only_to_me=False)
async def itemDraw10(session: CommandSession):
groupId = session.ctx['group_id']
userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await session.send('本群暂不支持十连抽^ ^')
return
strippedArg = session.current_arg_text.strip()
baseLevel, poolName = await getLevelAndPoolName(strippedArg)
baseLevel = baseLevel if baseLevel is not None else 0
ticketName = ['十连券', '上级十连券', '特级十连券', '天琴十连券'][baseLevel]
drawTenTicketInfo = await usefulItemDB.getItemStorageInfo(userId, ticketName)
if not drawTenTicketInfo or not drawTenTicketInfo.allowUse:
await session.send(f'你缺少{ticketName},无法十连抽^ ^')
return
await usefulItemDB.changeItemAmount(userId, ticketName, -1)
itemList = [await getItemFromDB(baseLevel, poolName) for i in range(10)]
outputStr = '十连抽结果:\n'
for item in itemList:
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
outputStr += f'[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
outputStr += '(New!)'
outputStr += '\n'
await drawItemDB.setItemStorage(userId, item.id)
outputStr = outputStr[:-1]
await session.send(outputStr)
async def ban(groupNum, userId):
bot = nonebot.get_bot()
dur_time = int(1.1 ** (5 + random.random() * 70))
print(f'抽奖口球-{dur_time}s, id:{userId}, group:{groupNum}')
msg = f'获得了:口球({dur_time}s)!'
await bot.set_group_ban(group_id=groupNum, user_id=userId, duration=dur_time)
await bot.send_group_msg(group_id=groupNum, message=msg)
async def getItem(groupNum, userId, strippedArg):
_, poolName = await getLevelAndPoolName(strippedArg)
redrawDice = await usefulItemDB.getItemStorageInfo(userId, '骰子碎片')
if not redrawDice or not redrawDice.allowUse:
drawLimit = 1
else:
drawLimit = min(51, redrawDice.amount + 1)
redrawCount, item = 0, None
for i in range(drawLimit):
redrawCount = i
item = await getItemFromDB(poolName=poolName)
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
if not existItemStorage:
break
msg = ''
if redrawCount > 0:
await usefulItemDB.changeItemAmount(userId, '骰子碎片', -redrawCount)
msg += f'消耗了骰子碎片*{redrawCount},'
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
msg += f'获得了:[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
msg += '(New!)'
if item.detail:
msg += f'\n物品说明:{item.detail}'
bot = nonebot.get_bot()
await bot.send_group_msg(group_id=groupNum, message=msg)
await drawItemDB.setItemStorage(userId, item.id)
async def getItemFromDB(startRareRank=0, poolName=None):
easyRand = 1 if startRareRank > 0 else random.random()
if easyRand < 0.7:
return await drawItemDB.getRandomItem(0, poolName)
normalRand = 1 if startRareRank > 1 else random.random()
if normalRand < 0.7:
return await drawItemDB.getRandomItem(1, poolName)
hardRand = 1 if startRareRank > 2 else random.random()
if hardRand < 0.7:
return await drawItemDB.getRandomItem(2, poolName)
lunaticRand = random.random()
if lunaticRand < 0.7:
return await drawItemDB.getRandomItem(3, poolName)
return await getItemFromDB(startRareRank, poolName)
@on_command(name='添加-Easy', aliases='物品添加-Easy', only_to_me=False)
|
itemRareDescribe = ['Easy', 'Normal', 'Hard', 'Lunatic']
drawConfig = config['drawItem']
@on_command(name='抽奖', only_to_me=False)
async def itemDraw(session: CommandSession):
groupId = session.ctx['group_id']
userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowDraw']:
await session.send('本群暂不支持抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await ban(groupId, userId)
return
banRisk = drawConfig['banRisk']
banShieldInfo = await usefulItemDB.getItemStorageInfo(userId, '量子护盾')
if banShieldInfo and banShieldInfo.allowUse:
await usefulItemDB.changeItemAmount(userId, '量子护盾', -1)
banRisk = banRisk / 10
if random.random() < banRisk:
await ban(groupId, userId)
return
strippedArg = session.current_arg_text.strip()
await getItem(groupId, userId, strippedArg)
@on_command(name='十连抽', only_to_me=False)
async def itemDraw10(session: CommandSession):
groupId = session.ctx['group_id']
userId = session.ctx['user_id']
if not groupId:
await session.send('暂不支持私聊抽奖^ ^')
return
if groupId not in drawConfig['groupAllowItem']:
await session.send('本群暂不支持十连抽^ ^')
return
strippedArg = session.current_arg_text.strip()
baseLevel, poolName = await getLevelAndPoolName(strippedArg)
baseLevel = baseLevel if baseLevel is not None else 0
ticketName = ['十连券', '上级十连券', '特级十连券', '天琴十连券'][baseLevel]
drawTenTicketInfo = await usefulItemDB.getItemStorageInfo(userId, ticketName)
if not drawTenTicketInfo or not drawTenTicketInfo.allowUse:
await session.send(f'你缺少{ticketName},无法十连抽^ ^')
return
await usefulItemDB.changeItemAmount(userId, ticketName, -1)
itemList = [await getItemFromDB(baseLevel, poolName) for i in range(10)]
outputStr = '十连抽结果:\n'
for item in itemList:
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
outputStr += f'[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
outputStr += '(New!)'
outputStr += '\n'
await drawItemDB.setItemStorage(userId, item.id)
outputStr = outputStr[:-1]
await session.send(outputStr)
async def ban(groupNum, userId):
bot = nonebot.get_bot()
dur_time = int(1.1 ** (5 + random.random() * 70))
print(f'抽奖口球-{dur_time}s, id:{userId}, group:{groupNum}')
msg = f'获得了:口球({dur_time}s)!'
await bot.set_group_ban(group_id=groupNum, user_id=userId, duration=dur_time)
await bot.send_group_msg(group_id=groupNum, message=msg)
async def getItem(groupNum, userId, strippedArg):
_, poolName = await getLevelAndPoolName(strippedArg)
redrawDice = await usefulItemDB.getItemStorageInfo(userId, '骰子碎片')
if not redrawDice or not redrawDice.allowUse:
drawLimit = 1
else:
drawLimit = min(51, redrawDice.amount + 1)
redrawCount, item = 0, None
for i in range(drawLimit):
redrawCount = i
item = await getItemFromDB(poolName=poolName)
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
if not existItemStorage:
break
msg = ''
if redrawCount > 0:
await usefulItemDB.changeItemAmount(userId, '骰子碎片', -redrawCount)
msg += f'消耗了骰子碎片*{redrawCount},'
existItemStorage = await drawItemDB.getSingleItemStorage(userId, item.id)
msg += f'获得了:[{itemRareDescribe[item.rareRank]}]{item.name}'
if not existItemStorage:
msg += '(New!)'
if item.detail:
msg += f'\n物品说明:{item.detail}'
bot = nonebot.get_bot()
await bot.send_group_msg(group_id=groupNum, message=msg)
await drawItemDB.setItemStorage(userId, item.id)
async def getItemFromDB(startRareRank=0, poolName=None):
easyRand = 1 if startRareRank > 0 else random.random()
if easyRand < 0.7:
return await drawItemDB.getRandomItem(0, poolName)
normalRand = 1 if startRareRank > 1 else random.random()
if normalRand < 0.7:
return await drawItemDB.getRandomItem(1, poolName)
hardRand = 1 if startRareRank > 2 else random.random()
if hardRand < 0.7:
return await drawItemDB.getRandomItem(2, poolName)
lunaticRand = random.random()
if lunaticRand < 0.7:
return await drawItemDB.getRandomItem(3, poolName)
return await getItemFromDB(startRareRank, poolName)
@on_command(name='添加-Easy', aliases='物品添加-Easy', only_to_me=False) | @CQ_injection_check_command | 2 | 2023-11-02 04:06:31+00:00 | 4k |
ilur98/DGQ | dgq/models/opt_a8w4.py | [
{
"identifier": "W4A8BF32OF32Linear",
"path": "dgq/models/linear.py",
"snippet": "class W4A8BF32OF32Linear(torch.nn.Module):\n # For qkv_proj\n def __init__(self, in_features, out_features, groupsize=128):\n super().__init__()\n self.in_features = in_features\n self.out_featur... | import torch
from torch import nn
from transformers.models.opt.modeling_opt import (
OPTConfig,
OPTForCausalLM,
OPTModel,
OPTPreTrainedModel,
OPTLearnedPositionalEmbedding,
OPTAttention,
OPTDecoderLayer,
OPTDecoder,
BaseModelOutputWithPast
)
from typing import Optional, Tuple, List
from dgq.models.linear import W4A8BF32OF32Linear, W4A8B8O8Linear
from dgq.models.fused import LayerNormQ
from transformers.utils import logging
from dgq.models.bmm import BMM_S8T_S8N_F32T
from torch.nn.functional import pad | 2,061 | logger = logging.get_logger(__name__)
class W4A8OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.qk_bmm = BMM_S8T_S8N_F32T(1.0)
self.k_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.v_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.q_proj = W4A8B8O8Linear(embed_dim, embed_dim)
| logger = logging.get_logger(__name__)
class W4A8OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.qk_bmm = BMM_S8T_S8N_F32T(1.0)
self.k_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.v_proj = W4A8B8O8Linear(embed_dim, embed_dim)
self.q_proj = W4A8B8O8Linear(embed_dim, embed_dim) | self.out_proj = W4A8BF32OF32Linear(embed_dim, embed_dim) | 0 | 2023-11-01 13:45:16+00:00 | 4k |
anilaltuner/personalized-news-agent | pages/chatbot.py | [
{
"identifier": "CUSTOM_ALGO_ID",
"path": "news.py",
"snippet": "CUSTOM_ALGO_ID = st.secrets[\"custom_algo_id\"]"
},
{
"identifier": "initialize_session",
"path": "news.py",
"snippet": "def initialize_session(user_input=\"\"):\n \"\"\"Initialize or restart the session.\"\"\"\n if u... | import streamlit as st
from firstbatch import AlgorithmLabel
from pydantic import BaseModel
from news import CUSTOM_ALGO_ID, initialize_session, fetch_content
from chat_tools.kernel import chat, setup_chat_with_memory
from markdowns.markdowns_chat import css_, sidebar | 1,793 |
# Pydantic models
class SessionData(BaseModel):
username: str
class PersonalizeData(BaseModel):
message: str
class SignalData(BaseModel):
sessionID: dict
id: str
def get_user_input():
return st.sidebar.text_input("Username/Session Name", st.session_state.get("username", ""))
def update_session_state(user_input):
st.session_state.session = st.session_state.personalized.session(
AlgorithmLabel.CUSTOM, vdbid="rss_db", custom_id=CUSTOM_ALGO_ID
)
st.session_state.batches = []
st.session_state.ids = []
st.session_state.likes = []
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches += batch
st.session_state.ids += ids
st.session_state.username = user_input
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_history = ""
st.session_state.chat_loader = 3
def display_sidebar():
user_input = get_user_input()
if user_input and st.session_state.get("username") != user_input:
update_session_state(user_input)
initialize_session(user_input)
fetch_content()
st.sidebar.title("Personalized AI Agent")
st.sidebar.markdown(sidebar)
def chat_init():
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
if "username" not in st.session_state:
st.session_state.username = ""
if "init" not in st.session_state:
if st.session_state.loading == 1:
st.session_state.ids, st.session_state.batches = st.session_state.personalized.batch(st.session_state.session)
chat(model=model, prompt=prompt, message="Hello!")
st.session_state.init = True
def submit():
st.session_state.test_st = st.session_state.user_input
st.session_state.user_input = ''
def display_box():
st.markdown(css_, unsafe_allow_html=True)
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_placeholder.markdown(st.session_state.html_content, unsafe_allow_html=True)
st.text_input("User Input", key="user_input", on_change=submit())
if "username" not in st.session_state:
st.session_state.username = ""
if st.session_state.test_st != "":
print("User input changed")
if st.session_state.chat_loader > 2:
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches = batch
st.session_state.ids = ids
st.session_state.chat_loader = 0
st.session_state.chat_loader += 1
chat(model=model, prompt=prompt, message=st.session_state.test_st)
st.session_state.test_st = ""
if __name__ == '__main__':
if 'user_input' not in st.session_state:
st.session_state.user_input = ''
st.session_state.chat_loader = 0
st.session_state.chat_placeholder = st.empty()
|
# Pydantic models
class SessionData(BaseModel):
username: str
class PersonalizeData(BaseModel):
message: str
class SignalData(BaseModel):
sessionID: dict
id: str
def get_user_input():
return st.sidebar.text_input("Username/Session Name", st.session_state.get("username", ""))
def update_session_state(user_input):
st.session_state.session = st.session_state.personalized.session(
AlgorithmLabel.CUSTOM, vdbid="rss_db", custom_id=CUSTOM_ALGO_ID
)
st.session_state.batches = []
st.session_state.ids = []
st.session_state.likes = []
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches += batch
st.session_state.ids += ids
st.session_state.username = user_input
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_history = ""
st.session_state.chat_loader = 3
def display_sidebar():
user_input = get_user_input()
if user_input and st.session_state.get("username") != user_input:
update_session_state(user_input)
initialize_session(user_input)
fetch_content()
st.sidebar.title("Personalized AI Agent")
st.sidebar.markdown(sidebar)
def chat_init():
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
if "username" not in st.session_state:
st.session_state.username = ""
if "init" not in st.session_state:
if st.session_state.loading == 1:
st.session_state.ids, st.session_state.batches = st.session_state.personalized.batch(st.session_state.session)
chat(model=model, prompt=prompt, message="Hello!")
st.session_state.init = True
def submit():
st.session_state.test_st = st.session_state.user_input
st.session_state.user_input = ''
def display_box():
st.markdown(css_, unsafe_allow_html=True)
if "html_content" not in st.session_state:
st.session_state.html_content = """
<div class="chat-container">
<div class="chat-box">
<div class="chat-output" id="chat-output"></div>
</div>
</div>
"""
st.session_state.chat_placeholder.markdown(st.session_state.html_content, unsafe_allow_html=True)
st.text_input("User Input", key="user_input", on_change=submit())
if "username" not in st.session_state:
st.session_state.username = ""
if st.session_state.test_st != "":
print("User input changed")
if st.session_state.chat_loader > 2:
ids, batch = st.session_state.personalized.batch(st.session_state.session)
st.session_state.batches = batch
st.session_state.ids = ids
st.session_state.chat_loader = 0
st.session_state.chat_loader += 1
chat(model=model, prompt=prompt, message=st.session_state.test_st)
st.session_state.test_st = ""
if __name__ == '__main__':
if 'user_input' not in st.session_state:
st.session_state.user_input = ''
st.session_state.chat_loader = 0
st.session_state.chat_placeholder = st.empty() | model, prompt = setup_chat_with_memory() | 4 | 2023-11-07 12:51:01+00:00 | 4k |
m4rkw/monzo-utils | monzo_utils/model/payment.py | [
{
"identifier": "Config",
"path": "monzo_utils/lib/config.py",
"snippet": "class Config(metaclass=Singleton):\n\n def __init__(self, config=None, config_path=None):\n if config_path is None:\n homedir = pwd.getpwuid(os.getuid()).pw_dir\n config_path = f\"{homedir}/.monzo\... | import re
import datetime
from monzo_utils.lib.config import Config
from monzo_utils.model.transaction import Transaction
from monzo_utils.lib.transactions import Transactions | 1,972 | if 'yearly_month' in self.payment_config:
if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:
return 'SKIPPED'
if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:
return 'SKIPPED'
if self.last_date and self.last_date >= self.last_salary_date:
return 'PAID'
if self.due_date and self.due_date >= self.next_salary_date:
return 'SKIPPED'
return 'DUE'
@property
def payment_type(self):
return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')
@property
def num_paid(self):
return None
@property
def num_total(self):
if 'months' in self.payment_config:
return self.payment_config['months']
return None
@property
def remaining(self):
pass
@property
def display_amount(self):
today = datetime.datetime.now()
today = datetime.date(today.year, today.month, today.day)
if 'last_amount_overrides' in Config().keys and \
self.payment_config['name'] in Config().last_amount_overrides and \
self.last_salary_amount in Config().last_amount_overrides[self.payment_config['name']]:
amount = Config().last_amount_overrides[self.payment_config['name']][self.last_salary_amount]
elif 'renewal' in self.payment_config and (today >= self.payment_config['renewal']['date'] or self.status == 'PAID'):
if 'first_payment' in self.payment_config['renewal'] and today <= self.payment_config['renewal']['date']:
amount = self.payment_config['renewal']['first_payment']
else:
if self.last_date >= self.payment_config['renewal']['date']:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['renewal']['amount']
elif self.last_payment:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['amount']
if self.transaction_type == 'money_in':
return 0 - amount
return amount
@property
def last_date(self):
if 'last_date' in self.cache:
return self.cache['last_date']
if 'last_date_overrides' in self.config and \
self.payment_config['name'] in self.config['last_date_overrides'] and \
self.last_salary_date in self.config['last_date_overrides'][self.payment_config['name']]:
self.cache['last_date'] = self.config['last_date_overrides'][self.payment_config['name']][self.last_salary_date]
return self.cache['last_date']
if 'desc' not in self.payment_config:
self.cache['last_date'] = None
return self.cache['last_date']
if self.last_payment:
self.cache['last_date'] = self.last_payment.date
else:
if self.older_last_payment is not None:
self.cache['last_date'] = self.older_last_payment.date
else:
self.cache['last_date'] = None
return self.cache['last_date']
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
if 'desc' not in self.payment_config:
self.payment_config['desc'] = type(self).__name__
where=[{'clause': self.transaction_type + ' > %s', 'params': [0]}]
if 'start_date' in self.payment_config:
where.append({
'clause': '`date` >= %s',
'params': [self.payment_config['start_date']]
})
if self.always_fixed or 'fixed' in self.payment_config and self.payment_config['fixed']:
method_name = f"find_all_by_declined_and_{self.transaction_type}_and_description"
|
class Payment:
transaction_type = 'money_out'
always_fixed = False
def __init__(self, config, payment_list_config, payment_config, last_salary_date, next_salary_date, following_salary_date):
self.config = config
self.payment_list_config = payment_list_config
self.payment_config = payment_config
self.last_salary_date = last_salary_date
self.next_salary_date = next_salary_date
self.following_salary_date = following_salary_date
self.today = datetime.datetime.now()
self.cache = {}
def data(self, abbreviate=False):
if self.num_paid is not None:
suffix = '%d/%d' % (
self.num_paid,
self.num_total
)
else:
suffix = ''
if self.remaining is not None:
remaining = self.remaining
else:
remaining = None
return {
'status': self.status,
'payment_type': self.payment_type if abbreviate is False else self.abbreviate(self.payment_type),
'name': self.name,
'suffix': suffix,
'amount': self.display_amount,
'remaining': remaining,
'last_date': self.short_date(self.last_date) if abbreviate else self.last_date,
'due_date': self.short_date(self.due_date) if abbreviate else self.due_date
}
def abbreviate(self, string):
abbreviated = ''
for i in range(0, len(string)):
if string[i].isupper():
abbreviated += string[i]
return abbreviated
def short_date(self, date):
if not date:
return None
return date.strftime('%d/%m/%y')
def display(self):
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
data['status'].rjust(7),
data['payment_type'].ljust(15),
data['name'].ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (data['amount'])).ljust(8),
('£%.2f' % (data['remaining'])).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def name(self):
return self.payment_config['name']
@property
def status(self):
if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'yearly_month' in self.payment_config:
if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:
return 'SKIPPED'
if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:
return 'SKIPPED'
if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:
return 'SKIPPED'
if self.last_date and self.last_date >= self.last_salary_date:
return 'PAID'
if self.due_date and self.due_date >= self.next_salary_date:
return 'SKIPPED'
return 'DUE'
@property
def payment_type(self):
return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')
@property
def num_paid(self):
return None
@property
def num_total(self):
if 'months' in self.payment_config:
return self.payment_config['months']
return None
@property
def remaining(self):
pass
@property
def display_amount(self):
today = datetime.datetime.now()
today = datetime.date(today.year, today.month, today.day)
if 'last_amount_overrides' in Config().keys and \
self.payment_config['name'] in Config().last_amount_overrides and \
self.last_salary_amount in Config().last_amount_overrides[self.payment_config['name']]:
amount = Config().last_amount_overrides[self.payment_config['name']][self.last_salary_amount]
elif 'renewal' in self.payment_config and (today >= self.payment_config['renewal']['date'] or self.status == 'PAID'):
if 'first_payment' in self.payment_config['renewal'] and today <= self.payment_config['renewal']['date']:
amount = self.payment_config['renewal']['first_payment']
else:
if self.last_date >= self.payment_config['renewal']['date']:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['renewal']['amount']
elif self.last_payment:
amount = float(getattr(self.last_payment, self.transaction_type))
else:
amount = self.payment_config['amount']
if self.transaction_type == 'money_in':
return 0 - amount
return amount
@property
def last_date(self):
if 'last_date' in self.cache:
return self.cache['last_date']
if 'last_date_overrides' in self.config and \
self.payment_config['name'] in self.config['last_date_overrides'] and \
self.last_salary_date in self.config['last_date_overrides'][self.payment_config['name']]:
self.cache['last_date'] = self.config['last_date_overrides'][self.payment_config['name']][self.last_salary_date]
return self.cache['last_date']
if 'desc' not in self.payment_config:
self.cache['last_date'] = None
return self.cache['last_date']
if self.last_payment:
self.cache['last_date'] = self.last_payment.date
else:
if self.older_last_payment is not None:
self.cache['last_date'] = self.older_last_payment.date
else:
self.cache['last_date'] = None
return self.cache['last_date']
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
if 'desc' not in self.payment_config:
self.payment_config['desc'] = type(self).__name__
where=[{'clause': self.transaction_type + ' > %s', 'params': [0]}]
if 'start_date' in self.payment_config:
where.append({
'clause': '`date` >= %s',
'params': [self.payment_config['start_date']]
})
if self.always_fixed or 'fixed' in self.payment_config and self.payment_config['fixed']:
method_name = f"find_all_by_declined_and_{self.transaction_type}_and_description"
| transactions = getattr(Transaction(), method_name)( | 1 | 2023-11-05 12:48:18+00:00 | 4k |
rossiyareich/inknhue | test.py | [
{
"identifier": "ConditionalAutoencoder",
"path": "src/conditional/conditional_autoencoder.py",
"snippet": "class ConditionalAutoencoder(nn.Module):\n def __init__(\n self,\n emb_channels: int,\n z_channels: int,\n channels: int,\n channel_multipliers: List[int],\n ... | import argparse
import gc
import logging
import os
import numpy as np
import torch
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image
from rich.traceback import install
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from tqdm.auto import tqdm
from src.conditional.conditional_autoencoder import ConditionalAutoencoder
from src.conditional.conditional_test_dataset import ConditionalTestDataset
from src.utils import resize_max | 1,823 |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--conf_path",
type=str,
required=True,
help="Path to the configuration file",
)
args = parser.parse_args()
return args
@torch.no_grad()
def main(args):
# Load configuration
logging.info("Loading configuration")
conf = OmegaConf.load(args.conf_path)
# Create output directory
logging.info("Creating output directory")
os.makedirs(conf.paths.results_path, exist_ok=True)
# Setup models
logging.info("Setting up models")
cond_autoencoder = ConditionalAutoencoder.load_from_saved(
conf.paths.pretrained_ckpt,
conf.paths.pretrained_yaml,
conf.paths.conditional_ckpt,
).to(device="cuda", dtype=torch.bfloat16)
cond_autoencoder.eval()
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s):
g, s = resize_max(g, conf.params.max_size), resize_max(s, conf.params.max_size)
g = g.resize(
(((g.size[0] + 7) // 8) * 8, ((g.size[1] + 7) // 8) * 8),
Image.Resampling.LANCZOS,
)
s = s.resize(g.size, Image.Resampling.LANCZOS)
pil_to_tensor = transforms.PILToTensor()
g, s = pil_to_tensor(g), pil_to_tensor(s)
g, s = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
g, s = g.to(device="cuda", dtype=torch.bfloat16), s.to(
device="cuda", dtype=torch.bfloat16
)
return g, s
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--conf_path",
type=str,
required=True,
help="Path to the configuration file",
)
args = parser.parse_args()
return args
@torch.no_grad()
def main(args):
# Load configuration
logging.info("Loading configuration")
conf = OmegaConf.load(args.conf_path)
# Create output directory
logging.info("Creating output directory")
os.makedirs(conf.paths.results_path, exist_ok=True)
# Setup models
logging.info("Setting up models")
cond_autoencoder = ConditionalAutoencoder.load_from_saved(
conf.paths.pretrained_ckpt,
conf.paths.pretrained_yaml,
conf.paths.conditional_ckpt,
).to(device="cuda", dtype=torch.bfloat16)
cond_autoencoder.eval()
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s):
g, s = resize_max(g, conf.params.max_size), resize_max(s, conf.params.max_size)
g = g.resize(
(((g.size[0] + 7) // 8) * 8, ((g.size[1] + 7) // 8) * 8),
Image.Resampling.LANCZOS,
)
s = s.resize(g.size, Image.Resampling.LANCZOS)
pil_to_tensor = transforms.PILToTensor()
g, s = pil_to_tensor(g), pil_to_tensor(s)
g, s = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
g, s = g.to(device="cuda", dtype=torch.bfloat16), s.to(
device="cuda", dtype=torch.bfloat16
)
return g, s
| cond_dataset = cond_dataset_full = ConditionalTestDataset( | 1 | 2023-11-03 09:35:30+00:00 | 4k |
TencentBlueKing/bkflow-feel | bkflow_feel/parsers.py | [
{
"identifier": "RangeGroupData",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupData(BaseModel):\n left_val: Any\n right_val: Any\n left_operator: RangeGroupOperator\n right_operator: RangeGroupOperator"
},
{
"identifier": "RangeGroupOperator",
"path": "bkflow... | import abc
import datetime
import logging
import re
import pytz
from dateutil.parser import parse as date_parse
from .data_models import RangeGroupData, RangeGroupOperator
from .utils import FEELFunctionsManager
from .validators import BinaryOperationValidator, DummyValidator, ListsLengthValidator | 2,333 | self.value = value
def evaluate(self, context):
return self.key.evaluate(context), self.value.evaluate(context)
class Context(Expression):
def __init__(self, pairs):
self.pairs = pairs
def evaluate(self, context):
return dict(pair.evaluate(context) for pair in self.pairs)
class ContextItem(Expression):
def __init__(self, expr, keys):
self.expr = expr
self.keys = keys
def evaluate(self, context):
result = self.expr.evaluate(context)
for key in self.keys:
if not isinstance(result, dict):
return None
result = result.get(key)
return result
class Variable(Expression):
def __init__(self, name):
self.name = name
def evaluate(self, context):
return context.get(self.name)
class FunctionCall(Expression):
def __init__(self, name, args):
self.name = name
self.args = args
def evaluate(self, context):
function = context.get(self.name)
if function is None:
raise ValueError(f"Unknown function: {self.name}")
return function(*[arg.evaluate(context) for arg in self.args])
class BinaryOperator(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
class SameTypeBinaryOperator(BinaryOperator):
validator_cls = BinaryOperationValidator
def __init__(self, operation, left, right):
super().__init__(left, right)
self.operation = operation
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
self.validator_cls()(left_val, right_val)
return getattr(self, self.operation)(left_val, right_val)
def add(self, left_val, right_val):
return left_val + right_val
def subtract(self, left_val, right_val):
return left_val - right_val
def multiply(self, left_val, right_val):
return left_val * right_val
def divide(self, left_val, right_val):
return left_val / right_val
def power(self, left_val, right_val):
return left_val**right_val
def equal(self, left_val, right_val):
return left_val == right_val
def less_than(self, left_val, right_val):
return left_val < right_val
def greater_than(self, left_val, right_val):
return left_val > right_val
def less_than_or_equal(self, left_val, right_val):
return left_val <= right_val
def greater_than_or_equal(self, left_val, right_val):
return left_val >= right_val
class NotEqual(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) != self.right.evaluate(context)
class And(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
class Or(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
class In(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
if isinstance(self.right, RangeGroup):
left_operation = (
left_val > right_val.left_val
| # -*- coding: utf-8 -*-
logger = logging.getLogger(__name__)
class Expression(metaclass=abc.ABCMeta):
validator_cls = DummyValidator
@abc.abstractmethod
def evaluate(self, context):
pass
class CommonExpression(Expression):
def __init__(self, value):
self.value = value
def evaluate(self, context):
return self.value
class Expr(CommonExpression):
def evaluate(self, context):
return self.value.evaluate(context)
class Number(CommonExpression):
pass
class String(CommonExpression):
pass
class Boolean(CommonExpression):
pass
class Null(Expression):
def evaluate(self, context):
return None
class List(Expression):
def __init__(self, *items):
self.items = items
def evaluate(self, context):
return [item.evaluate(context) for item in self.items]
class ListItem(Expression):
def __init__(self, list_expr, index):
self.list_expr = list_expr
self.index = index
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list) or self.index == 0 or len(items) < abs(self.index):
return None
items = items[self.index - 1] if self.index > 0 else items[self.index]
return items
class ListMatch(Expression):
validator_cls = ListsLengthValidator
def __init__(self, iter_pairs, expr):
self.iter_pairs = iter_pairs
self.expr = expr
def evaluate_and_validate_iter_pairs(self, context):
iter_pairs = [(pair[0].value, pair[1].evaluate(context)) for pair in self.iter_pairs]
self.validator_cls()(lists=[pair[1] for pair in iter_pairs])
return iter_pairs
class ListEvery(ListMatch):
def evaluate(self, context):
iter_pairs = self.evaluate_and_validate_iter_pairs(context)
for i in range(0, len(iter_pairs[0][1])):
tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}
if self.expr.evaluate(tmp_context) is False:
return False
return True
class ListSome(ListMatch):
def evaluate(self, context):
iter_pairs = self.evaluate_and_validate_iter_pairs(context)
for i in range(0, len(iter_pairs[0][1])):
tmp_context = {**context, **{pair[0]: pair[1][i] for pair in iter_pairs}}
if self.expr.evaluate(tmp_context) is True:
return True
return False
class ListFilter(Expression):
def __init__(self, list_expr, filter_expr):
self.list_expr = list_expr
self.filter_expr = filter_expr
def evaluate(self, context):
items = self.list_expr.evaluate(context)
if not isinstance(items, list):
return None
result = []
for item in items:
try:
# 当 item 为 dict 且 filter 中对比的 key 缺失时,可能报错
if self.filter_expr.evaluate(item if isinstance(item, dict) else {"item": item}):
result.append(item)
except Exception as e:
logger.exception(e)
pass
return result
class Pair(Expression):
def __init__(self, key, value):
self.key = key
self.value = value
def evaluate(self, context):
return self.key.evaluate(context), self.value.evaluate(context)
class Context(Expression):
def __init__(self, pairs):
self.pairs = pairs
def evaluate(self, context):
return dict(pair.evaluate(context) for pair in self.pairs)
class ContextItem(Expression):
def __init__(self, expr, keys):
self.expr = expr
self.keys = keys
def evaluate(self, context):
result = self.expr.evaluate(context)
for key in self.keys:
if not isinstance(result, dict):
return None
result = result.get(key)
return result
class Variable(Expression):
def __init__(self, name):
self.name = name
def evaluate(self, context):
return context.get(self.name)
class FunctionCall(Expression):
def __init__(self, name, args):
self.name = name
self.args = args
def evaluate(self, context):
function = context.get(self.name)
if function is None:
raise ValueError(f"Unknown function: {self.name}")
return function(*[arg.evaluate(context) for arg in self.args])
class BinaryOperator(Expression):
def __init__(self, left, right):
self.left = left
self.right = right
class SameTypeBinaryOperator(BinaryOperator):
validator_cls = BinaryOperationValidator
def __init__(self, operation, left, right):
super().__init__(left, right)
self.operation = operation
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
self.validator_cls()(left_val, right_val)
return getattr(self, self.operation)(left_val, right_val)
def add(self, left_val, right_val):
return left_val + right_val
def subtract(self, left_val, right_val):
return left_val - right_val
def multiply(self, left_val, right_val):
return left_val * right_val
def divide(self, left_val, right_val):
return left_val / right_val
def power(self, left_val, right_val):
return left_val**right_val
def equal(self, left_val, right_val):
return left_val == right_val
def less_than(self, left_val, right_val):
return left_val < right_val
def greater_than(self, left_val, right_val):
return left_val > right_val
def less_than_or_equal(self, left_val, right_val):
return left_val <= right_val
def greater_than_or_equal(self, left_val, right_val):
return left_val >= right_val
class NotEqual(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) != self.right.evaluate(context)
class And(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) and self.right.evaluate(context)
class Or(BinaryOperator):
def evaluate(self, context):
return self.left.evaluate(context) or self.right.evaluate(context)
class In(BinaryOperator):
def evaluate(self, context):
left_val = self.left.evaluate(context)
right_val = self.right.evaluate(context)
if isinstance(self.right, RangeGroup):
left_operation = (
left_val > right_val.left_val | if right_val.left_operator == RangeGroupOperator.GT | 1 | 2023-11-09 13:47:26+00:00 | 4k |
namedgraph/oxijen | oxijen/model_impl/impl.py | [
{
"identifier": "Resource",
"path": "oxijen/rdf_model.py",
"snippet": "class Resource(ABC):\n\n @property\n def node(self):\n return self._node\n\n @property\n def graph(self):\n return self._graph\n\n @property\n def is_anon(self):\n if isinstance(self.node, Named... | from oxijen.rdf_model import Resource, Property, Graph, Dataset
from oxijen.model_impl.xsd import XSD
from pyoxigraph import Store, Triple, BlankNode, NamedNode, Literal, Quad, DefaultGraph
from typing import Iterator, Union, Optional, Any | 1,777 | def __hash__(self):
return hash(self.node.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.node.value == other.node.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return self.node.__str__()
def __repr__(self) -> str:
return self.__str__()
def add_property(self, property: 'Property', value: Union[Resource, Literal]) -> 'Resource':
if isinstance(value, Resource):
value = value.node
self.graph.store.add(Quad(self.node, property.node, value, self.graph.name)) # assumes GraphStoreImpl!
return self
def list_properties(self, property: Optional[Property] = None) -> Iterator[Triple]:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quads = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
return map(lambda quad: quad.triple, quads)
def remove_all(self, property: Optional[Property] = None) -> Resource:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quad_iter = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
for quad in quad_iter:
self.graph.store.remove(quad)
return self
class PropertyImpl(ResourceImpl, Property):
pass
class GraphImpl(Graph):
def create_resource(self, uri: Optional[str] = None) -> Resource:
if uri is not None:
return ResourceImpl(NamedNode(uri), self)
else:
return ResourceImpl(BlankNode(), self)
def create_property(self, uri: str) -> Property:
return ResourceImpl(NamedNode(uri), self)
def create_literal(self, value: str, language: Optional[str] = None) -> Literal:
return Literal(value, language=language) # should it be xsd:string-typed by default as per RDF 1.1?
def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:
if datatype is None:
match value:
case int():
datatype = NamedNode(XSD.INTEGER.value)
case str():
datatype = NamedNode(XSD.STRING.value)
case float():
datatype = NamedNode(XSD.FLOAT.value)
# TO-DO: support more types
case _:
raise TypeError('Unsupported type conversion')
else:
if type(datatype) is str:
datatype = NamedNode(datatype)
return Literal(str(value), datatype=datatype)
class GraphStoreImpl(GraphImpl):
def __init__(self, store: Store, name: Union[BlankNode, NamedNode]):
self.store = store
self.name = name
def __len__(self) -> int:
return len(list(self.list_triples()))
def list_subjects(self) -> Iterator[Resource]:
return iter(set(map(lambda triple: ResourceImpl(triple.subject, self), self.list_triples())))
def list_triples(self) -> Iterator[Triple]:
quads = self.store.quads_for_pattern(None, None, None, self.name)
return map(lambda quad: quad.triple, quads)
def add(self, triples: Union[Iterator[Triple], 'Graph']) -> 'Graph':
if isinstance(triples, Graph):
triples = triples.list_triples()
quads = map(lambda triple: Quad(triple.subject, triple.predicate, triple.object, self.name), triples)
self.store.extend(quads)
return self
def remove_all(self) -> 'Graph':
self.store.remove_graph(self.name)
return self
|
class ResourceImpl(Resource):
def __init__(self, node: Union[BlankNode, NamedNode], graph: Graph):
self._node = node
self._graph = graph
def __hash__(self):
return hash(self.node.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.node.value == other.node.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self) -> str:
return self.node.__str__()
def __repr__(self) -> str:
return self.__str__()
def add_property(self, property: 'Property', value: Union[Resource, Literal]) -> 'Resource':
if isinstance(value, Resource):
value = value.node
self.graph.store.add(Quad(self.node, property.node, value, self.graph.name)) # assumes GraphStoreImpl!
return self
def list_properties(self, property: Optional[Property] = None) -> Iterator[Triple]:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quads = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
return map(lambda quad: quad.triple, quads)
def remove_all(self, property: Optional[Property] = None) -> Resource:
if isinstance(property, Property):
property_node = property.node
else:
property_node = None
quad_iter = self.graph.store.quads_for_pattern(self.node, property_node, None, None)
for quad in quad_iter:
self.graph.store.remove(quad)
return self
class PropertyImpl(ResourceImpl, Property):
pass
class GraphImpl(Graph):
def create_resource(self, uri: Optional[str] = None) -> Resource:
if uri is not None:
return ResourceImpl(NamedNode(uri), self)
else:
return ResourceImpl(BlankNode(), self)
def create_property(self, uri: str) -> Property:
return ResourceImpl(NamedNode(uri), self)
def create_literal(self, value: str, language: Optional[str] = None) -> Literal:
return Literal(value, language=language) # should it be xsd:string-typed by default as per RDF 1.1?
def create_typed_literal(self, value: Any, datatype: Optional[Union[str, NamedNode]] = None) -> Literal:
if datatype is None:
match value:
case int():
datatype = NamedNode(XSD.INTEGER.value)
case str():
datatype = NamedNode(XSD.STRING.value)
case float():
datatype = NamedNode(XSD.FLOAT.value)
# TO-DO: support more types
case _:
raise TypeError('Unsupported type conversion')
else:
if type(datatype) is str:
datatype = NamedNode(datatype)
return Literal(str(value), datatype=datatype)
class GraphStoreImpl(GraphImpl):
def __init__(self, store: Store, name: Union[BlankNode, NamedNode]):
self.store = store
self.name = name
def __len__(self) -> int:
return len(list(self.list_triples()))
def list_subjects(self) -> Iterator[Resource]:
return iter(set(map(lambda triple: ResourceImpl(triple.subject, self), self.list_triples())))
def list_triples(self) -> Iterator[Triple]:
quads = self.store.quads_for_pattern(None, None, None, self.name)
return map(lambda quad: quad.triple, quads)
def add(self, triples: Union[Iterator[Triple], 'Graph']) -> 'Graph':
if isinstance(triples, Graph):
triples = triples.list_triples()
quads = map(lambda triple: Quad(triple.subject, triple.predicate, triple.object, self.name), triples)
self.store.extend(quads)
return self
def remove_all(self) -> 'Graph':
self.store.remove_graph(self.name)
return self
| class DatasetStoreImpl(Dataset): | 3 | 2023-11-03 19:50:51+00:00 | 4k |
sivasurend/lyzr | build/lib/lyzr/utils/chat_utils.py | [
{
"identifier": "LyzrLLMFactory",
"path": "lyzr/base/llm.py",
"snippet": "class LyzrLLMFactory:\n\n def __init__(self) -> None:\n None\n @staticmethod\n def from_defaults(model: str = \"gpt-3.5-turbo\", **kwargs) -> LLM:\n return LiteLLM(model=model, **kwargs)"
},
{
"ident... | from typing import Union, Optional, List
from llama_index.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.embeddings.utils import EmbedType
from lyzr.base.llm import LyzrLLMFactory
from lyzr.base.service import LyzrService
from lyzr.base.vector_store import LyzrVectorStoreIndex
from lyzr.utils.document_reading import (
read_pdf_as_documents,
read_docx_as_documents,
read_txt_as_documents,
read_website_as_documents,
read_webpage_as_documents,
read_youtube_as_documents,
) | 1,907 |
def pdf_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params)
|
def pdf_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = {} if llm_params is None else llm_params
vector_store_params = (
{"vector_store_type": "LanceDBVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
llm = LyzrLLMFactory.from_defaults(**llm_params) | service_context = LyzrService.from_defaults( | 1 | 2023-11-07 14:52:08+00:00 | 4k |
allmonday/pydantic2-resolve | tests/resolver/test_24_parse_to_obj_for_dataclass.py | [
{
"identifier": "Resolver",
"path": "pydantic2_resolve/resolver.py",
"snippet": "class Resolver:\n \"\"\"\n Entrypoint of a resolve action\n \"\"\"\n def __init__(\n self, \n loader_filters: Optional[Dict[Any, Dict[str, Any]]] = None, \n loader_instances: Opt... | from typing import List
from dataclasses import dataclass, field
from pydantic2_resolve import Resolver, LoaderDepend
import pytest | 2,573 |
@pytest.mark.asyncio
async def test_loader_depends_1():
BOOKS = {
1: [{'name': 'book1'}, {'name': 'book2'}],
2: [{'name': 'book3'}, {'name': 'book4'}],
3: [{'name': 'book1'}, {'name': 'book2'}],
}
@dataclass
class Book():
name: str
async def batch_load_fn(keys):
books = [[dict(name=bb['name']) for bb in BOOKS.get(k, [])] for k in keys]
return books
@dataclass
class Student():
id: int
name: str
books: List[Book] = field(default_factory=list)
|
@pytest.mark.asyncio
async def test_loader_depends_1():
BOOKS = {
1: [{'name': 'book1'}, {'name': 'book2'}],
2: [{'name': 'book3'}, {'name': 'book4'}],
3: [{'name': 'book1'}, {'name': 'book2'}],
}
@dataclass
class Book():
name: str
async def batch_load_fn(keys):
books = [[dict(name=bb['name']) for bb in BOOKS.get(k, [])] for k in keys]
return books
@dataclass
class Student():
id: int
name: str
books: List[Book] = field(default_factory=list) | def resolve_books(self, loader=LoaderDepend(batch_load_fn)): | 1 | 2023-11-01 02:37:26+00:00 | 4k |
WolfgangFahl/dcm | tests/test_rwth_aachen_module.py | [
{
"identifier": "CompetenceArea",
"path": "dcm/dcm_core.py",
"snippet": "class CompetenceArea(CompetenceElement):\n \"\"\"\n Represents a specific area within a competence aspect, containing various facets.\n\n Attributes:\n facets (List[CompetenceFacet]): A list of CompetenceFacet objec... | import json
import os
from ngwidgets.basetest import Basetest
from dcm.dcm_core import (
CompetenceArea,
CompetenceAspect,
CompetenceElement,
CompetenceFacet,
CompetenceLevel,
CompetenceTree,
) | 2,946 | """
Created on 2023-11-11
@author: wf
"""
class TestModule(Basetest):
"""
test RWTH Aachen Modulhandbuch
"""
def get_name(self, json_node: dict, lang: str = "en") -> str:
"""
Retrieves the name of a specified JSON node in the specified language.
Args:
json_node (dict): The JSON node from which the name is to be extracted.
lang (str, optional): The language in which the name should be retrieved. Defaults to "en" (English).
Returns:
str: The name of the JSON node in the specified language. The result might be german (de) if
there is only a single name specified in the Modulhandbuch XML input which is germany by default
"""
names = json_node.get("NAME", [])
name = "?"
if isinstance(names, list):
for lang_name in names:
if isinstance(lang_name, dict):
node_lang = lang_name.get("@LANG", None)
if node_lang and node_lang == lang:
name = lang_name.get("#text", "?")
else:
# what's up here?
# might be german now ..
name = names["#text"]
pass
return name
def create_competence_element(
self, parent: CompetenceElement, json_node: dict, url: str
):
"""
convert the given json node to a competence element based on the level
1: CompetenceTree
2: CompetenceAspect
3: CompetenceFacet
Args:
parent(CompetenceElement): the parent element - None for the tree
json_node(dict): the current node to convert
url(str): the base_url for the node
"""
competence_element = None
lvl = json_node.get("@LVL", "?")
credits_str = json_node.get("@CREDITS", None)
credits = int(credits_str) if credits_str else None
level = int(lvl)
nr = json_node.get("@NR")
desc = None
name = self.get_name(json_node)
if lvl == "1":
| """
Created on 2023-11-11
@author: wf
"""
class TestModule(Basetest):
"""
test RWTH Aachen Modulhandbuch
"""
def get_name(self, json_node: dict, lang: str = "en") -> str:
"""
Retrieves the name of a specified JSON node in the specified language.
Args:
json_node (dict): The JSON node from which the name is to be extracted.
lang (str, optional): The language in which the name should be retrieved. Defaults to "en" (English).
Returns:
str: The name of the JSON node in the specified language. The result might be german (de) if
there is only a single name specified in the Modulhandbuch XML input which is germany by default
"""
names = json_node.get("NAME", [])
name = "?"
if isinstance(names, list):
for lang_name in names:
if isinstance(lang_name, dict):
node_lang = lang_name.get("@LANG", None)
if node_lang and node_lang == lang:
name = lang_name.get("#text", "?")
else:
# what's up here?
# might be german now ..
name = names["#text"]
pass
return name
def create_competence_element(
self, parent: CompetenceElement, json_node: dict, url: str
):
"""
convert the given json node to a competence element based on the level
1: CompetenceTree
2: CompetenceAspect
3: CompetenceFacet
Args:
parent(CompetenceElement): the parent element - None for the tree
json_node(dict): the current node to convert
url(str): the base_url for the node
"""
competence_element = None
lvl = json_node.get("@LVL", "?")
credits_str = json_node.get("@CREDITS", None)
credits = int(credits_str) if credits_str else None
level = int(lvl)
nr = json_node.get("@NR")
desc = None
name = self.get_name(json_node)
if lvl == "1": | tree = CompetenceTree( | 5 | 2023-11-06 09:24:24+00:00 | 4k |
StoneMoe/ASub | app/ui/views/project_view.py | [
{
"identifier": "Project",
"path": "app/core/models/project.py",
"snippet": "class Project:\r\n path: str # 工程目录(相对位置)\r\n name: str # 工程名称\r\n\r\n def __init__(self, name: str, existed_err=False):\r\n self.name = name\r\n self.path = os.path.join(Core.PROJ_DIR, name)\r\n ... | import os
from typing import Optional
from PyQt5.QtCore import pyqtSignal, QPoint, Qt
from PyQt5.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QAction
from qfluentwidgets import PushButton, FluentIcon, RoundMenu, ToolButton, MessageBox, StateToolTip
from app.core.models.project import Project, TranscribeOpt
from app.core.utils.generic import info
from app.ui.components.label import AutoLabel
from app.ui.config import cfg
from app.ui.const import CONTAINER_MARGINS
from app.ui.utils import run_in_thread, clear_layout, open_folder
from app.ui.windows.subtitle_window import SubtitleWindow
| 3,257 |
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
|
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
| self.label_title = AutoLabel('<Loading>', self, Qt.ElideMiddle)
| 3 | 2023-11-07 16:45:43+00:00 | 4k |
openshift/lightspeed-service | ols/src/llms/llm_loader.py | [
{
"identifier": "constants",
"path": "ols/src/constants.py",
"snippet": "SUMMARIZATION_TEMPLATE = \"\"\"\nThe following context contains several pieces of documentation. Please summarize the context for the user.\nDocumentation context:\n{context_str}\n\nSummary:\n\n\"\"\"\nSUMMARY_TASK_BREAKDOWN_TEMPLA... | import inspect
import os
import warnings
import json
from typing import Optional
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from ols.src import constants
from ols.utils import config
from ols.utils.logger import Logger
from langchain.chat_models import ChatOpenAI
from genai.credentials import Credentials
from genai.extensions.langchain import LangChainInterface
from genai.schemas import GenerateParams
from langchain.llms import Ollama
from langchain.llms import HuggingFaceTextGenInference
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.foundation_models.extensions.langchain import (
WatsonxLLM,
)
from ibm_watson_machine_learning.metanames import (
GenTextParamsMetaNames as GenParams,
) | 3,114 | """LLM backend libraries loader."""
# workaround to disable UserWarning
warnings.simplefilter("ignore", UserWarning)
class UnsupportedProvider(Exception):
"""Exception thrown when provided provider is not supported or is unknown."""
class LLMLoader:
"""Note: This class loads the LLM backend libraries if the specific LLM is loaded.
Known caveats: Currently supports a single instance/model per backend.
llm_backends: a string with a supported llm backend name ('openai','ollama','tgi','watson','bam').
params : (optional) array of parameters to override and pass to the llm backend
# using the class and overriding specific parameters
llm_backend = 'ollama'
params = {'temperature': 0.02, 'top_p': 0.95}
llm_config = LLMLoader(llm_backend=llm_backend, params=params)
llm_chain = LLMChain(llm=llm_config.llm, prompt=prompt)
"""
def __init__(
self,
provider: Optional[str] = None,
model: Optional[str] = None,
url: Optional[str] = None,
params: Optional[dict] = None,
logger=None,
) -> None:
"""Initialize loader using provided provider, model, and other parameters."""
self.logger = logger if logger is not None else Logger("llm_loader").logger
if provider is None:
raise Exception("ERROR: Missing provider")
self.provider = provider
self.url = url
if model is None:
raise Exception("ERROR: Missing model")
self.model = model
# return empty dictionary if not defined
self.llm_params = params if params else {}
self.llm = None
self._set_llm_instance()
def _set_llm_instance(self):
self.logger.debug(
f"[{inspect.stack()[0][3]}] Loading LLM {self.model} from {self.provider}"
)
# convert to string to handle None or False definitions
match str(self.provider).lower():
case constants.PROVIDER_OPENAI:
self._openai_llm_instance()
case constants.PROVIDER_OLLAMA:
self._ollama_llm_instance()
case constants.PROVIDER_TGI:
self._tgi_llm_instance()
case constants.PROVIDER_WATSONX:
self._watson_llm_instance()
case constants.PROVIDER_BAM:
self._bam_llm_instance()
case _:
msg = f"ERROR: Unsupported LLM {self.provider}"
self.logger.error(msg)
raise UnsupportedProvider(msg)
def _openai_llm_instance(self):
self.logger.debug(f"[{inspect.stack()[0][3]}] Creating OpenAI LLM instance")
try:
except Exception:
self.logger.error(
"ERROR: Missing openai libraries. Skipping loading backend LLM."
)
return
| """LLM backend libraries loader."""
# workaround to disable UserWarning
warnings.simplefilter("ignore", UserWarning)
class UnsupportedProvider(Exception):
"""Exception thrown when provided provider is not supported or is unknown."""
class LLMLoader:
"""Note: This class loads the LLM backend libraries if the specific LLM is loaded.
Known caveats: Currently supports a single instance/model per backend.
llm_backends: a string with a supported llm backend name ('openai','ollama','tgi','watson','bam').
params : (optional) array of parameters to override and pass to the llm backend
# using the class and overriding specific parameters
llm_backend = 'ollama'
params = {'temperature': 0.02, 'top_p': 0.95}
llm_config = LLMLoader(llm_backend=llm_backend, params=params)
llm_chain = LLMChain(llm=llm_config.llm, prompt=prompt)
"""
def __init__(
self,
provider: Optional[str] = None,
model: Optional[str] = None,
url: Optional[str] = None,
params: Optional[dict] = None,
logger=None,
) -> None:
"""Initialize loader using provided provider, model, and other parameters."""
self.logger = logger if logger is not None else Logger("llm_loader").logger
if provider is None:
raise Exception("ERROR: Missing provider")
self.provider = provider
self.url = url
if model is None:
raise Exception("ERROR: Missing model")
self.model = model
# return empty dictionary if not defined
self.llm_params = params if params else {}
self.llm = None
self._set_llm_instance()
def _set_llm_instance(self):
self.logger.debug(
f"[{inspect.stack()[0][3]}] Loading LLM {self.model} from {self.provider}"
)
# convert to string to handle None or False definitions
match str(self.provider).lower():
case constants.PROVIDER_OPENAI:
self._openai_llm_instance()
case constants.PROVIDER_OLLAMA:
self._ollama_llm_instance()
case constants.PROVIDER_TGI:
self._tgi_llm_instance()
case constants.PROVIDER_WATSONX:
self._watson_llm_instance()
case constants.PROVIDER_BAM:
self._bam_llm_instance()
case _:
msg = f"ERROR: Unsupported LLM {self.provider}"
self.logger.error(msg)
raise UnsupportedProvider(msg)
def _openai_llm_instance(self):
self.logger.debug(f"[{inspect.stack()[0][3]}] Creating OpenAI LLM instance")
try:
except Exception:
self.logger.error(
"ERROR: Missing openai libraries. Skipping loading backend LLM."
)
return | provider = config.llm_config.providers[constants.PROVIDER_OPENAI] | 0 | 2023-11-08 06:29:41+00:00 | 4k |
xlcaptain/LLM-Workbench | component/knowledge_chat.py | [
{
"identifier": "ElasticsearchServer",
"path": "component/pipelines/es.py",
"snippet": "class ElasticsearchServer:\n def __init__(self):\n self.client = Elasticsearch(\n ES_URL,\n verify_certs=False,\n )\n self.embedding = Embeddings()\n self.es = Ela... | import time
import os
import streamlit as st
import pandas as pd
from .pipelines.es import ElasticsearchServer
from .pipelines.utils import handle_response, create_message
from .pipelines.prompt import KNOWLEDGE_PROMPT, CHAT_EXAMPLES | 2,058 |
BAICHUAN_URL = os.getenv("BAICHUAN_URL")
def handle_kb_qa(prompt, top_k, threshold):
index_name = 'audit_index'
es_server = ElasticsearchServer()
# es_server.doc_upload(index_name=index_name)
result = es_server.doc_search(index_name=index_name, query=prompt, top_k=top_k, method='hybrid',
knn_boost=threshold)
context = "\n".join([doc['content'] for doc in result])
doc_prompt = KNOWLEDGE_PROMPT.format(query=prompt, context=context)
reference = [
{
"text": doc['content'],
"source": doc['source'],
"score": float(doc['score'])
}
for doc in result
]
return doc_prompt, reference, True
def knowledge_chat():
with st.sidebar:
# TODO: 对话模型与会话绑定
def on_mode_change():
st.session_state.messages = []
mode = st.session_state.vec_modify
text = f"已切换到 {mode} 模式。"
if mode == "知识库问答":
cur_kb = st.session_state.get("selected_kb")
if cur_kb:
text = f"{text} 当前知识库: `{cur_kb}`。"
st.toast(text)
# 模型参数选择
temperature = st.slider("Temperature:", 0.0, 1.0, 0.7, 0.05)
history_len = st.number_input("历史对话轮数:", 0, 10, 1)
# 知识库配置
with st.expander("知识库配置", True):
vec_modify = st.selectbox("请选择相似度搜索模式:",
["Elasticsearch",
],
index=0,
on_change=on_mode_change,
key="vec_modify",
)
kb_top_k = st.number_input("匹配知识条数:", 1, 6, 5)
score_threshold = st.slider(
f"{'知识匹配分数阈值:' if vec_modify == 'Faiss向量库' else '语义关键字权重:(0:代表仅使用关键字)'}:",
0.0, 1.0, float(0.5), 0.01)
# 清空对话
cols = st.columns(2)
if cols[1].button(
"清空对话",
use_container_width=True,
):
st.session_state.messages = []
st.experimental_rerun()
st.title("💬 审计知识库问答")
chat_input_placeholder = "请输入对话内容,换行请使用Shift+Enter "
df = pd.DataFrame({"示例": CHAT_EXAMPLES})
with st.expander("DataFrame", False):
st.table(df)
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(chat_input_placeholder, key="prompt"):
full_response = ''
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append(create_message("user", prompt))
with st.chat_message("assistant"):
message_placeholder = st.empty()
with st.spinner("思考中..."):
doc_prompt, reference, is_true = handle_kb_qa(prompt,
st.session_state.get("top_k", kb_top_k),
score_threshold)
if is_true:
|
BAICHUAN_URL = os.getenv("BAICHUAN_URL")
def handle_kb_qa(prompt, top_k, threshold):
index_name = 'audit_index'
es_server = ElasticsearchServer()
# es_server.doc_upload(index_name=index_name)
result = es_server.doc_search(index_name=index_name, query=prompt, top_k=top_k, method='hybrid',
knn_boost=threshold)
context = "\n".join([doc['content'] for doc in result])
doc_prompt = KNOWLEDGE_PROMPT.format(query=prompt, context=context)
reference = [
{
"text": doc['content'],
"source": doc['source'],
"score": float(doc['score'])
}
for doc in result
]
return doc_prompt, reference, True
def knowledge_chat():
with st.sidebar:
# TODO: 对话模型与会话绑定
def on_mode_change():
st.session_state.messages = []
mode = st.session_state.vec_modify
text = f"已切换到 {mode} 模式。"
if mode == "知识库问答":
cur_kb = st.session_state.get("selected_kb")
if cur_kb:
text = f"{text} 当前知识库: `{cur_kb}`。"
st.toast(text)
# 模型参数选择
temperature = st.slider("Temperature:", 0.0, 1.0, 0.7, 0.05)
history_len = st.number_input("历史对话轮数:", 0, 10, 1)
# 知识库配置
with st.expander("知识库配置", True):
vec_modify = st.selectbox("请选择相似度搜索模式:",
["Elasticsearch",
],
index=0,
on_change=on_mode_change,
key="vec_modify",
)
kb_top_k = st.number_input("匹配知识条数:", 1, 6, 5)
score_threshold = st.slider(
f"{'知识匹配分数阈值:' if vec_modify == 'Faiss向量库' else '语义关键字权重:(0:代表仅使用关键字)'}:",
0.0, 1.0, float(0.5), 0.01)
# 清空对话
cols = st.columns(2)
if cols[1].button(
"清空对话",
use_container_width=True,
):
st.session_state.messages = []
st.experimental_rerun()
st.title("💬 审计知识库问答")
chat_input_placeholder = "请输入对话内容,换行请使用Shift+Enter "
df = pd.DataFrame({"示例": CHAT_EXAMPLES})
with st.expander("DataFrame", False):
st.table(df)
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input(chat_input_placeholder, key="prompt"):
full_response = ''
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append(create_message("user", prompt))
with st.chat_message("assistant"):
message_placeholder = st.empty()
with st.spinner("思考中..."):
doc_prompt, reference, is_true = handle_kb_qa(prompt,
st.session_state.get("top_k", kb_top_k),
score_threshold)
if is_true: | full_response = handle_response([ | 1 | 2023-11-01 07:54:03+00:00 | 4k |
NicolasZucchet/Online-learning-LR-dependencies | online_lru/rec.py | [
{
"identifier": "matrix_init",
"path": "online_lru/rec_init.py",
"snippet": "def matrix_init(key, shape, dtype=jnp.float32, normalization=1):\n return random.normal(key=key, shape=shape, dtype=dtype) / normalization"
},
{
"identifier": "truncated_normal_matrix_init",
"path": "online_lru/r... | from functools import partial
from flax import linen as nn
from .rec_init import matrix_init, truncated_normal_matrix_init, theta_init, nu_init, gamma_log_init
from flax.core.frozen_dict import unfreeze
import jax
import jax.numpy as jnp | 1,623 | d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta",
partial(theta_init, max_phase=self.max_phase, log=self.exp_param),
(self.d_hidden,),
) # phase of lambda in [0, max_phase]
self.nu = self.param(
"nu",
partial(nu_init, r_min=self.r_min, r_max=self.r_max, log=self.exp_param),
(self.d_hidden,),
) # norm of lambda in [r_min, r_max]
if self.gamma_norm:
self.gamma_log = self.param(
|
# Parallel scan operations
@jax.vmap
def binary_operator_diag(q_i, q_j):
"""Binary operator for parallel scan of linear recurrence"""
A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, A_j * b_i + b_j
@jax.vmap
def binary_operator_diag_spatial(q_i, q_j):
"""Same as above but stop the gradient for the recurrent connection"""
A_i, b_i = q_i
A_j, b_j = q_j
return A_j * A_i, jax.lax.stop_gradient(A_j * b_i) + b_j
class LRU(nn.Module):
"""
LRU layer that updates internal elegibility traces to allow online learning.
"""
d_hidden: int # hidden state dimension
d_model: int # input and output dimensions
seq_length: int # time sequence length
gamma_norm: bool = True # use gamma normalization
exp_param: bool = True # exponential parametrization for lambda
r_min: float = 0.0 # smallest eigenvalue norm
r_max: float = 1.0 # largest eigenvalue norm
max_phase: float = 6.28 # max phase eigenvalue
training_mode: str = "bptt" # which learning algorithm that will be used
training: bool = False # TODO remove, for debugging purposes
def get_diag_lambda(self, nu=None, theta=None):
"""
Transform parameters nu and theta into the diagonal of the recurrent
Lambda matrix.
Args:
nu, theta array[N]: when set to their default values, None, the
parameters will take the values of the Module.
NOTE: these arguments are added in order to backpropagate through this
transformation.
"""
if nu is None:
nu = self.nu
if theta is None:
theta = self.theta
if self.exp_param:
theta = jnp.exp(theta)
nu = jnp.exp(nu)
return jnp.exp(-nu + 1j * theta)
def get_diag_gamma(self):
"""
Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.
"""
if self.gamma_norm:
return jnp.exp(self.gamma_log)
else:
return jnp.ones((self.d_hidden,))
def get_B(self):
"""
Get input to hidden matrix B.
"""
return self.B_re + 1j * self.B_im
def get_B_norm(self):
"""
Get modulated input to hidden matrix gamma B.
"""
return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)
def to_output(self, inputs, hidden_states):
"""
Compute output given inputs and hidden states.
Args:
inputs array[T, H].
hidden_states array[T, N].
"""
C = self.C_re + 1j * self.C_im
D = self.D
y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)
return y
def get_hidden_states(self, inputs):
"""
Compute the hidden states corresponding to inputs
Return:
hidden_states array[T, N]
"""
# Materializing the diagonal of Lambda and projections
diag_lambda = self.get_diag_lambda()
B_norm = self.get_B_norm()
# Running the LRU + output projection
# For details on parallel scan, check discussion in Smith et al (2022).
Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)
Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)
elements = (Lambda_elements, Bu_elements)
if self.training_mode == "bptt":
_, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)
else:
_, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)
return hidden_states
def setup(self):
# Check that desired approximation is handled
if self.training_mode == "online_snap1":
raise NotImplementedError("SnAp-1 not implemented for LRU")
assert self.training_mode in [
"bptt",
"online_full",
"online_full_rec",
"online_full_rec_simpleB",
"online_snap1", # same as online_full
"online_spatial",
"online_1truncated",
"online_reservoir",
]
self.online = "online" in self.training_mode # whether we compute the gradient online
if self.online:
self.approximation_type = self.training_mode[7:]
# NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and
# theta lambda is initialized uniformly in complex plane
self.theta = self.param(
"theta",
partial(theta_init, max_phase=self.max_phase, log=self.exp_param),
(self.d_hidden,),
) # phase of lambda in [0, max_phase]
self.nu = self.param(
"nu",
partial(nu_init, r_min=self.r_min, r_max=self.r_max, log=self.exp_param),
(self.d_hidden,),
) # norm of lambda in [r_min, r_max]
if self.gamma_norm:
self.gamma_log = self.param( | "gamma_log", partial(gamma_log_init, log=self.exp_param), (self.nu, self.theta) | 4 | 2023-11-01 13:18:32+00:00 | 4k |
uygarkurt/video-retalking | models/LNet.py | [
{
"identifier": "RETURNX",
"path": "models/transformer.py",
"snippet": "class RETURNX(nn.Module):\n def __init__(self,):\n super().__init__()\n\n def forward(self, x, y): # x is the cropped, y is the foreign reference \n return x"
},
{
"identifier": "Transformer",
"path":... | import functools
import torch
import torch.nn as nn
from models.transformer import RETURNX, Transformer
from models.base_blocks import Conv2d, LayerNorm2d, FirstBlock2d, DownBlock2d, UpBlock2d, \
FFCADAINResBlocks, Jump, FinalBlock2d | 2,505 |
class Visual_Encoder(nn.Module):
def __init__(self, image_nc, ngf, img_f, layers, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Visual_Encoder, self).__init__()
self.layers = layers
self.first_inp = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
self.first_ref = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
for i in range(layers):
in_channels = min(ngf*(2**i), img_f)
out_channels = min(ngf*(2**(i+1)), img_f)
model_ref = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
model_inp = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
if i < 2:
ca_layer = RETURNX()
else:
ca_layer = Transformer(2**(i+1) * ngf,2,4,ngf,ngf*4)
setattr(self, 'ca' + str(i), ca_layer)
setattr(self, 'ref_down' + str(i), model_ref)
setattr(self, 'inp_down' + str(i), model_inp)
self.output_nc = out_channels * 2
def forward(self, maskGT, ref):
x_maskGT, x_ref = self.first_inp(maskGT), self.first_ref(ref)
out=[x_maskGT]
for i in range(self.layers):
model_ref = getattr(self, 'ref_down'+str(i))
model_inp = getattr(self, 'inp_down'+str(i))
ca_layer = getattr(self, 'ca'+str(i))
x_maskGT, x_ref = model_inp(x_maskGT), model_ref(x_ref)
x_maskGT = ca_layer(x_maskGT, x_ref)
if i < self.layers - 1:
out.append(x_maskGT)
else:
out.append(torch.cat([x_maskGT, x_ref], dim=1)) # concat ref features !
return out
class Decoder(nn.Module):
def __init__(self, image_nc, feature_nc, ngf, img_f, layers, num_block, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Decoder, self).__init__()
self.layers = layers
for i in range(layers)[::-1]:
if i == layers-1:
in_channels = ngf*(2**(i+1)) * 2
else:
in_channels = min(ngf*(2**(i+1)), img_f)
out_channels = min(ngf*(2**i), img_f)
up = UpBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
res = FFCADAINResBlocks(num_block, in_channels, feature_nc, norm_layer, nonlinearity, use_spect)
|
class Visual_Encoder(nn.Module):
def __init__(self, image_nc, ngf, img_f, layers, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Visual_Encoder, self).__init__()
self.layers = layers
self.first_inp = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
self.first_ref = FirstBlock2d(image_nc, ngf, norm_layer, nonlinearity, use_spect)
for i in range(layers):
in_channels = min(ngf*(2**i), img_f)
out_channels = min(ngf*(2**(i+1)), img_f)
model_ref = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
model_inp = DownBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
if i < 2:
ca_layer = RETURNX()
else:
ca_layer = Transformer(2**(i+1) * ngf,2,4,ngf,ngf*4)
setattr(self, 'ca' + str(i), ca_layer)
setattr(self, 'ref_down' + str(i), model_ref)
setattr(self, 'inp_down' + str(i), model_inp)
self.output_nc = out_channels * 2
def forward(self, maskGT, ref):
x_maskGT, x_ref = self.first_inp(maskGT), self.first_ref(ref)
out=[x_maskGT]
for i in range(self.layers):
model_ref = getattr(self, 'ref_down'+str(i))
model_inp = getattr(self, 'inp_down'+str(i))
ca_layer = getattr(self, 'ca'+str(i))
x_maskGT, x_ref = model_inp(x_maskGT), model_ref(x_ref)
x_maskGT = ca_layer(x_maskGT, x_ref)
if i < self.layers - 1:
out.append(x_maskGT)
else:
out.append(torch.cat([x_maskGT, x_ref], dim=1)) # concat ref features !
return out
class Decoder(nn.Module):
def __init__(self, image_nc, feature_nc, ngf, img_f, layers, num_block, norm_layer=nn.BatchNorm2d, nonlinearity=nn.LeakyReLU(), use_spect=False):
super(Decoder, self).__init__()
self.layers = layers
for i in range(layers)[::-1]:
if i == layers-1:
in_channels = ngf*(2**(i+1)) * 2
else:
in_channels = min(ngf*(2**(i+1)), img_f)
out_channels = min(ngf*(2**i), img_f)
up = UpBlock2d(in_channels, out_channels, norm_layer, nonlinearity, use_spect)
res = FFCADAINResBlocks(num_block, in_channels, feature_nc, norm_layer, nonlinearity, use_spect) | jump = Jump(out_channels, norm_layer, nonlinearity, use_spect) | 8 | 2023-11-02 18:25:51+00:00 | 4k |
fortelex/hiveline | hiveline/results/journeys.py | [
{
"identifier": "fptf",
"path": "hiveline/models/fptf.py",
"snippet": "def _remove_empty_keys(d):\ndef read_datetime(time_str):\ndef format_datetime(dt):\n def __init__(self, name=None, address=None, longitude=None, latitude=None, altitude=None):\n def to_dict(self):\n def to_json(self):\n d... | import datetime
import json
import math
import os.path
from typing import Callable, Generator
from shapely import Polygon, Point
from hiveline.models import fptf
from hiveline.models.options import Options, Option
from hiveline.mongo.db import get_database
from hiveline.routing.util import ensure_directory | 2,452 |
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Journeys:
def __init__(self, sim_id: str, db=None, use_cache=True, cache="./cache"):
if db is None:
db = get_database()
self.db = db
self.sim_id = sim_id
if cache.endswith("/"):
cache = cache[:-1]
self.use_cache = use_cache
self.cache = cache + "/hiveline-journeys"
|
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Journeys:
def __init__(self, sim_id: str, db=None, use_cache=True, cache="./cache"):
if db is None:
db = get_database()
self.db = db
self.sim_id = sim_id
if cache.endswith("/"):
cache = cache[:-1]
self.use_cache = use_cache
self.cache = cache + "/hiveline-journeys" | ensure_directory(self.cache) | 4 | 2023-11-07 15:34:04+00:00 | 4k |
uhppoted/uhppoted-app-home-assistant | custom_components/uhppoted/event.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/uhppoted/const.py",
"snippet": "DOMAIN = 'uhppoted'"
},
{
"identifier": "CONF_BIND_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BIND_ADDR = 'bind_address'"
},
{
"identifier": "CONF_BROADCAST_ADDR",
... | import datetime
import logging
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from uhppoted import uhppote
from .const import DOMAIN
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import ATTR_ADDRESS
from .const import ATTR_NETMASK
from .const import ATTR_GATEWAY
from .const import ATTR_FIRMWARE
from .config import configure_controllers
from .config import configure_doors
from .controller import ControllerInfo
from .door import ControllerDoorOpened | 1,818 | from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
options = entry.options
bind = options[CONF_BIND_ADDR]
broadcast = options[CONF_BROADCAST_ADDR]
listen = options[CONF_LISTEN_ADDR]
| from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
options = entry.options
bind = options[CONF_BIND_ADDR]
broadcast = options[CONF_BROADCAST_ADDR]
listen = options[CONF_LISTEN_ADDR] | debug = options[CONF_DEBUG] | 4 | 2023-11-06 18:46:49+00:00 | 4k |
shixiaoyu0216/SAC4IR | train.py | [
{
"identifier": "env_sac",
"path": "sacd/env_sac.py",
"snippet": "class Env():\r\n def __init__(self, observation_data, I, max_item_id, each_user, K, item_ctr_dict, pop_dict):\r\n def reset(self, observation):\r\n def step(self, action, pass_item_list):\r"
},
{
"identifier": "SacdAgent"... | import copy
import csv
import os
import yaml
from datetime import datetime
from sacd import env_sac
from sacd.agent.sacd import SacdAgent
from util import rowdata_process_util, json_util
from util.metric.Gini import Gini
from util.metric.HR import HR
from util.metric.NDCG import ndcg_metric | 2,818 |
def getHistory(each_user, dataset_name):
history_list = []
try:
with open('./dataset/' + dataset_name + '/transition/' + str(each_user) + '_transition.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
history_list.append(row)
except:
return history_list
return history_list
def run_sac():
path = "./config/sacd.yaml"
env_id = "Recommender"
with open(path, encoding='utf-8') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
name = path.split('/')[-1].rstrip('.yaml')
cur_time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join('logs', env_id, f'{name}-seed{0}-{cur_time}')
dataset_name = "ml-1m"
obs_dataset = rowdata_process_util.Dataset('./dataset/' + dataset_name + '/ratings.dat')
train_dict = obs_dataset.splitData()
item_id_list, item_num, max_item_id = obs_dataset.getAllItem()
item_ctr_dict = json_util.load_dict('./dataset/' + dataset_name + '/click_through_rate.json')
pop_dict = obs_dataset.getPopular(train_dict)
K = config['K']
user_num = 0
precision_test = 0
hr_test = 0
ndcg_test = 0
pop_gini = copy.deepcopy(pop_dict)
for i in pop_gini.keys():
pop_gini[i] = 0
for each_user in train_dict:
user_history = getHistory(each_user, dataset_name)
history_list_train = user_history[:int(0.8 * len(user_history))]
history_list_test_set = user_history[int(0.8 * len(user_history)):]
history_list_test = history_list_test_set[0:1]
if len(history_list_train) <= 10 * 1.2:
continue
user_num += 1
pass_item_list = []
for user_id, item_id, ratings, __ in obs_dataset.data:
if user_id == each_user and ratings == 0:
pass_item_list.append(item_id)
test_set = list(set([int(i[1]) for i in history_list_test_set]) - set(pass_item_list))
observation_data = train_dict[each_user]
env = env_sac.Env(observation_data[-K:], list(set(item_id_list)), max_item_id, each_user, K, item_ctr_dict,
pop_dict)
agent = SacdAgent(env=env, log_dir=log_dir, cuda=False, state_re=True, dueling_net=False, **config)
agent.run_offpolicy(history_list_train)
state = history_list_test[0][0]
actions_list = agent.exploit(state, each_user)
precision_test += (len(set(actions_list) & set(test_set))) / (len(actions_list))
hr_test += HR(test_set, actions_list)
ndcg_test += ndcg_metric({each_user: actions_list}, {each_user: test_set})
for i in actions_list:
if i in pop_gini.keys():
pop_gini[i] += 1
if user_num != 0:
print("Precision: " + str(precision_test) + " / " + str(user_num) + " = " + str(precision_test / user_num))
print("HR: " + str(hr_test) + " / " + str(user_num) + " = " + str(hr_test / user_num))
print("NDCG: " + str(ndcg_test) + " / " + str(user_num) + " = " + str(ndcg_test / user_num))
for k in pop_gini.copy():
if pop_gini[k] == 0:
del pop_gini[k]
|
def getHistory(each_user, dataset_name):
history_list = []
try:
with open('./dataset/' + dataset_name + '/transition/' + str(each_user) + '_transition.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
history_list.append(row)
except:
return history_list
return history_list
def run_sac():
path = "./config/sacd.yaml"
env_id = "Recommender"
with open(path, encoding='utf-8') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
name = path.split('/')[-1].rstrip('.yaml')
cur_time = datetime.now().strftime("%Y%m%d-%H%M")
log_dir = os.path.join('logs', env_id, f'{name}-seed{0}-{cur_time}')
dataset_name = "ml-1m"
obs_dataset = rowdata_process_util.Dataset('./dataset/' + dataset_name + '/ratings.dat')
train_dict = obs_dataset.splitData()
item_id_list, item_num, max_item_id = obs_dataset.getAllItem()
item_ctr_dict = json_util.load_dict('./dataset/' + dataset_name + '/click_through_rate.json')
pop_dict = obs_dataset.getPopular(train_dict)
K = config['K']
user_num = 0
precision_test = 0
hr_test = 0
ndcg_test = 0
pop_gini = copy.deepcopy(pop_dict)
for i in pop_gini.keys():
pop_gini[i] = 0
for each_user in train_dict:
user_history = getHistory(each_user, dataset_name)
history_list_train = user_history[:int(0.8 * len(user_history))]
history_list_test_set = user_history[int(0.8 * len(user_history)):]
history_list_test = history_list_test_set[0:1]
if len(history_list_train) <= 10 * 1.2:
continue
user_num += 1
pass_item_list = []
for user_id, item_id, ratings, __ in obs_dataset.data:
if user_id == each_user and ratings == 0:
pass_item_list.append(item_id)
test_set = list(set([int(i[1]) for i in history_list_test_set]) - set(pass_item_list))
observation_data = train_dict[each_user]
env = env_sac.Env(observation_data[-K:], list(set(item_id_list)), max_item_id, each_user, K, item_ctr_dict,
pop_dict)
agent = SacdAgent(env=env, log_dir=log_dir, cuda=False, state_re=True, dueling_net=False, **config)
agent.run_offpolicy(history_list_train)
state = history_list_test[0][0]
actions_list = agent.exploit(state, each_user)
precision_test += (len(set(actions_list) & set(test_set))) / (len(actions_list))
hr_test += HR(test_set, actions_list)
ndcg_test += ndcg_metric({each_user: actions_list}, {each_user: test_set})
for i in actions_list:
if i in pop_gini.keys():
pop_gini[i] += 1
if user_num != 0:
print("Precision: " + str(precision_test) + " / " + str(user_num) + " = " + str(precision_test / user_num))
print("HR: " + str(hr_test) + " / " + str(user_num) + " = " + str(hr_test / user_num))
print("NDCG: " + str(ndcg_test) + " / " + str(user_num) + " = " + str(ndcg_test / user_num))
for k in pop_gini.copy():
if pop_gini[k] == 0:
del pop_gini[k] | print("Gini: " + str(Gini(pop_gini))) | 4 | 2023-11-02 07:35:57+00:00 | 4k |
fw-ai/fireworks_poe_bot | fireworks_poe_bot/fw_poe_qr_bot.py | [
{
"identifier": "PoeBot",
"path": "fireworks_poe_bot/fastapi_poe/base.py",
"snippet": "class PoeBot:\n # Override these for your bot\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n \"\"\"Override this to retu... | import base64
import copy
import io
import fireworks.client
import time
import uuid
import requests
import qrcode
import traceback
from typing import AsyncIterable, Dict, List, Optional, Union
from .fastapi_poe import PoeBot
from sse_starlette.sse import ServerSentEvent
from .fastapi_poe.types import (
PartialResponse,
QueryRequest,
ReportErrorRequest,
ReportFeedbackRequest,
SettingsRequest,
SettingsResponse,
ErrorResponse,
)
from fireworks.client.api import ChatMessage
from fireworks.client.error import InvalidRequestError
from fireworks.client.image import ImageInference, Answer
from fireworks_poe_bot.plugin import log_error, log_info, log_warn, register_bot_plugin
from fireworks_poe_bot.config import ModelConfig
from itertools import groupby
from PIL import Image
from google.cloud import storage | 3,103 | def parse_input(input_string, default_qr_strength, default_prompt_strength):
# Parse initial prompt
prompt_end_index = input_string.find('--')
if prompt_end_index == -1:
prompt_end_index = len(input_string)
prompt = input_string[:prompt_end_index].strip() if prompt_end_index != -1 else input_string.strip()
input_string = input_string[prompt_end_index:].strip()
qr_prompt = None
qr_strength = default_qr_strength
prompt_strength = default_prompt_strength
model = "sdxl"
while len(input_string) > 0:
next_flag_idx = input_string.find('--', 2)
if next_flag_idx == -1:
next_flag_idx = len(input_string)
# Parse the flag and its arguments
if input_string.startswith('--qr-strength'):
qr_strength = float(input_string[len("--qr-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--qr'):
qr_prompt = input_string[len("--qr"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--prompt-strength'):
prompt_strength = int(input_string[len("--prompt-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--model'):
model = input_string[len("--model"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
else:
raise ValueError(f'Unknown flag: {input_string[:next_flag_idx]}')
if qr_prompt is None:
raise ValueError('Please specify a QR prompt with a --qr flag.')
return prompt, qr_prompt, qr_strength, prompt_strength, model
def gen_qr_code(input_text: str) -> Image:
# Generate QR Code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=16,
border=4,
)
qr.add_data(input_text)
qr.make(fit=True)
# Create QR Code image
img = qr.make_image(fill_color="black", back_color="white")
# Padding the image to be 768x768
width, height = img.size
new_width = new_height = 768
# Create a new image with white background
new_img = Image.new("RGB", (new_width, new_height), "white")
# Paste the original image onto the new image, centered
new_img.paste(img, ((new_width - width) // 2, (new_height - height) // 2))
return new_img
class QRCodeConfig(ModelConfig):
gcs_bucket_name: str
conditioning_scale: Optional[float] = None
default_cfg_scale: Optional[float] = None
@register_bot_plugin("qr_models", QRCodeConfig)
class FireworksPoeQRBot(PoeBot):
def __init__(
self,
model: str,
api_key: str,
environment: str,
deployment: str,
server_version: str,
gcs_bucket_name: str,
conditioning_scale: float,
default_cfg_scale: float,
):
super().__init__()
self.model = model
self.api_key = api_key
self.environment = environment
self.deployment = deployment
self.server_version = server_version
self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8
model_atoms = model.split("/")
if len(model_atoms) != 4:
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
if model_atoms[0] != "accounts" or model_atoms[2] != "models":
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
self.account = model_atoms[1]
self.model = model_atoms[3]
self.client = ImageInference(account=self.account, model=self.model)
self.gcs_bucket_name = gcs_bucket_name
self.conditioning_scale = conditioning_scale
def _log_warn(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "WARNING",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
)
|
def parse_input(input_string, default_qr_strength, default_prompt_strength):
# Parse initial prompt
prompt_end_index = input_string.find('--')
if prompt_end_index == -1:
prompt_end_index = len(input_string)
prompt = input_string[:prompt_end_index].strip() if prompt_end_index != -1 else input_string.strip()
input_string = input_string[prompt_end_index:].strip()
qr_prompt = None
qr_strength = default_qr_strength
prompt_strength = default_prompt_strength
model = "sdxl"
while len(input_string) > 0:
next_flag_idx = input_string.find('--', 2)
if next_flag_idx == -1:
next_flag_idx = len(input_string)
# Parse the flag and its arguments
if input_string.startswith('--qr-strength'):
qr_strength = float(input_string[len("--qr-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--qr'):
qr_prompt = input_string[len("--qr"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--prompt-strength'):
prompt_strength = int(input_string[len("--prompt-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--model'):
model = input_string[len("--model"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
else:
raise ValueError(f'Unknown flag: {input_string[:next_flag_idx]}')
if qr_prompt is None:
raise ValueError('Please specify a QR prompt with a --qr flag.')
return prompt, qr_prompt, qr_strength, prompt_strength, model
def gen_qr_code(input_text: str) -> Image:
# Generate QR Code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=16,
border=4,
)
qr.add_data(input_text)
qr.make(fit=True)
# Create QR Code image
img = qr.make_image(fill_color="black", back_color="white")
# Padding the image to be 768x768
width, height = img.size
new_width = new_height = 768
# Create a new image with white background
new_img = Image.new("RGB", (new_width, new_height), "white")
# Paste the original image onto the new image, centered
new_img.paste(img, ((new_width - width) // 2, (new_height - height) // 2))
return new_img
class QRCodeConfig(ModelConfig):
gcs_bucket_name: str
conditioning_scale: Optional[float] = None
default_cfg_scale: Optional[float] = None
@register_bot_plugin("qr_models", QRCodeConfig)
class FireworksPoeQRBot(PoeBot):
def __init__(
self,
model: str,
api_key: str,
environment: str,
deployment: str,
server_version: str,
gcs_bucket_name: str,
conditioning_scale: float,
default_cfg_scale: float,
):
super().__init__()
self.model = model
self.api_key = api_key
self.environment = environment
self.deployment = deployment
self.server_version = server_version
self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8
model_atoms = model.split("/")
if len(model_atoms) != 4:
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
if model_atoms[0] != "accounts" or model_atoms[2] != "models":
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
self.account = model_atoms[1]
self.model = model_atoms[3]
self.client = ImageInference(account=self.account, model=self.model)
self.gcs_bucket_name = gcs_bucket_name
self.conditioning_scale = conditioning_scale
def _log_warn(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "WARNING",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
) | log_warn(payload) | 10 | 2023-11-03 23:24:23+00:00 | 4k |
In-Network-Machine-Learning/QCMP | receive_queues.py | [
{
"identifier": "path_stats",
"path": "q_table.py",
"snippet": "class path_stats():\n def __init__(self, path_queues, path_weights=0):\n self.path_queues = path_queues\n self.path_weights = path_weights\n self.action = 2\n self.reward = 0\n\n def weighted_average(self):... | import os
import sys
import grpc
import math
import numpy as np
import p4runtime_lib.bmv2
import p4runtime_lib.helper
from scapy.all import *
from scapy.layers.inet import _IPOption_HDR
from q_table import (path_stats,
q_table) | 3,101 |
class IPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swtraces",
adjust=lambda pkt,l:l*2+4),
ShortField("count", 0),
PacketListField("swtraces",
[],
SwitchTrace,
count_from=lambda pkt:(pkt.count*1)) ]
def runthat(switch_q_table, switch, mri, path_dicts, counter, index1, index2, index3, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params):
# index1 : index for where switch queue data is stored in path_dicts (list of dicts)
# index2 : which switch trace contains the queue length
# index3 : swid for path defining switch
switch_q_table.update_parameters()
queue_length = mri.swtraces[index2].qdepth
# print(mri.swtraces[i].swid, mri.swtraces[i].qdepth)
if mri.swtraces[index3].swid == diff_switches[0]:
path_dicts[index1]['path1'] = int(queue_length/2)
counter[index1][0] += 1
elif mri.swtraces[index3].swid == diff_switches[1]:
path_dicts[index1]['path2'] = int(queue_length/2)
counter[index1][1] += 1
if 3 in counter[index1]:
zero_indices = [i for i, x in enumerate(counter[index1]) if x == 0]
for index in zero_indices:
path_dicts[index1]["path{0}".format(index + 1)] = 100 # max queue length
if len(path_dicts[index1]) == 2:
global old_paths
# print(path_dict)
new_paths = path_stats([path_dicts[index1]['path1'], path_dicts[index1]['path2']])
switch_q_table.update_q_table(switch_q_table.parameters['LEARNING_RATE'], switch_q_table.parameters['DISCOUNT'], old_paths[index1], new_paths)
# print(q_table)
new_paths.get_next_action(switch_q_table, switch_q_table.parameters['epsilon'])
new_paths.get_new_weights(old_paths[index1], switch_q_table.parameters['action_weight'])
new_paths.get_reward(old_paths[index1])
print('s{0}'.format(index1+1), new_paths.path_weights, new_paths.action, new_paths.path_queues[::-1])
p4info_file_path = os.path.join(os.getcwd(), 'build/load_balance_advanced.p4.p4info.txt')
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
switch.MasterArbitrationUpdate()
new_paths.change_path_weights(old_paths[index1], p4info_helper, switch, nhop_dmacs, nhop_ipv4s, ports)
switch.shutdown()
switch_q_table.reset_parameters(new_paths, reset_params[index1])
old_paths[index1] = new_paths
path_dicts[index1].clear()
for i in range(len(counter[index1])):
counter[index1][i] = 0
def handle_pkt(pkt, s1_q_table, s2_q_table, s3_q_table, path_dicts, counter, reset_params):
# pkt.show2()
if pkt[IP]:
mri=pkt[IP][IPOption_MRI]
path_len = len(mri.swtraces)
if path_len == 3:
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0)
nhop_dmacs = ["00:00:00:00:01:04", "00:00:00:00:01:05"]
nhop_ipv4s = ["10.0.2.0", "10.0.3.0"]
ports = [4, 5]
diff_switches = [2, 3]
runthat(s1_q_table, s1, mri, path_dicts, counter, 0, 2, 1, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
if mri.swtraces[3].swid == 2:
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1)
nhop_dmacs = ["00:00:00:00:02:03", "00:00:00:00:02:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s2_q_table, s2, mri, path_dicts, counter, 1, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
elif mri.swtraces[3].swid == 3:
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2)
nhop_dmacs = ["00:00:00:00:03:03", "00:00:00:00:03:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s3_q_table, s3, mri, path_dicts, counter, 2, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
print("cannot find IP header in the packet")
sys.stdout.flush()
def main():
| # This file is part of the Planter extend project: QCMP.
# This program is a free software tool, which does ensemble in-network reinforcement learning for load balancing.
# licensed under Apache-2.0
#
# Utility: This file is used to receive telemetry traffic and update q-table
#
# Copyright (c) 2022-2023 Benjamin Rienecker Modified by Changgang Zheng
# Copyright (c) Computing Infrastructure Group, Department of Engineering Science, University of Oxford
#!/usr/bin/env python3
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../utils/'))
old_paths = [path_stats([0, 0], [50, 50]), path_stats([0, 0], [50, 50]), path_stats([0, 0], [50, 50])]
class SwitchTrace(Packet):
fields_desc = [ IntField("swid", 0),
IntField("qdepth", 0)]
def extract_padding(self, p):
return "", p
class IPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swtraces",
adjust=lambda pkt,l:l*2+4),
ShortField("count", 0),
PacketListField("swtraces",
[],
SwitchTrace,
count_from=lambda pkt:(pkt.count*1)) ]
def runthat(switch_q_table, switch, mri, path_dicts, counter, index1, index2, index3, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params):
# index1 : index for where switch queue data is stored in path_dicts (list of dicts)
# index2 : which switch trace contains the queue length
# index3 : swid for path defining switch
switch_q_table.update_parameters()
queue_length = mri.swtraces[index2].qdepth
# print(mri.swtraces[i].swid, mri.swtraces[i].qdepth)
if mri.swtraces[index3].swid == diff_switches[0]:
path_dicts[index1]['path1'] = int(queue_length/2)
counter[index1][0] += 1
elif mri.swtraces[index3].swid == diff_switches[1]:
path_dicts[index1]['path2'] = int(queue_length/2)
counter[index1][1] += 1
if 3 in counter[index1]:
zero_indices = [i for i, x in enumerate(counter[index1]) if x == 0]
for index in zero_indices:
path_dicts[index1]["path{0}".format(index + 1)] = 100 # max queue length
if len(path_dicts[index1]) == 2:
global old_paths
# print(path_dict)
new_paths = path_stats([path_dicts[index1]['path1'], path_dicts[index1]['path2']])
switch_q_table.update_q_table(switch_q_table.parameters['LEARNING_RATE'], switch_q_table.parameters['DISCOUNT'], old_paths[index1], new_paths)
# print(q_table)
new_paths.get_next_action(switch_q_table, switch_q_table.parameters['epsilon'])
new_paths.get_new_weights(old_paths[index1], switch_q_table.parameters['action_weight'])
new_paths.get_reward(old_paths[index1])
print('s{0}'.format(index1+1), new_paths.path_weights, new_paths.action, new_paths.path_queues[::-1])
p4info_file_path = os.path.join(os.getcwd(), 'build/load_balance_advanced.p4.p4info.txt')
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
switch.MasterArbitrationUpdate()
new_paths.change_path_weights(old_paths[index1], p4info_helper, switch, nhop_dmacs, nhop_ipv4s, ports)
switch.shutdown()
switch_q_table.reset_parameters(new_paths, reset_params[index1])
old_paths[index1] = new_paths
path_dicts[index1].clear()
for i in range(len(counter[index1])):
counter[index1][i] = 0
def handle_pkt(pkt, s1_q_table, s2_q_table, s3_q_table, path_dicts, counter, reset_params):
# pkt.show2()
if pkt[IP]:
mri=pkt[IP][IPOption_MRI]
path_len = len(mri.swtraces)
if path_len == 3:
s1 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s1',
address='127.0.0.1:50051',
device_id=0)
nhop_dmacs = ["00:00:00:00:01:04", "00:00:00:00:01:05"]
nhop_ipv4s = ["10.0.2.0", "10.0.3.0"]
ports = [4, 5]
diff_switches = [2, 3]
runthat(s1_q_table, s1, mri, path_dicts, counter, 0, 2, 1, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
if mri.swtraces[3].swid == 2:
s2 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s2',
address='127.0.0.1:50052',
device_id=1)
nhop_dmacs = ["00:00:00:00:02:03", "00:00:00:00:02:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s2_q_table, s2, mri, path_dicts, counter, 1, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
elif mri.swtraces[3].swid == 3:
s3 = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name='s3',
address='127.0.0.1:50053',
device_id=2)
nhop_dmacs = ["00:00:00:00:03:03", "00:00:00:00:03:04"]
nhop_ipv4s = ["10.0.4.0", "10.0.5.0"]
ports = [3, 4]
diff_switches = [4, 5]
runthat(s3_q_table, s3, mri, path_dicts, counter, 2, 3, 2, diff_switches, nhop_dmacs, nhop_ipv4s, ports, reset_params)
else:
print("cannot find IP header in the packet")
sys.stdout.flush()
def main(): | s1_q_table = q_table() | 1 | 2023-11-01 09:37:28+00:00 | 4k |
Fsoft-AIC/LSDM | run/predict_contact.py | [
{
"identifier": "ProxDataset_txt",
"path": "posa/dataset.py",
"snippet": "class ProxDataset_txt(Dataset): # when jump_step=8, for a whole seq, dataset's max_frame is 165, max num_seg is 29\n def __init__(self, data_dir, fix_orientation=False, no_obj_classes=8, max_frame=220,\n ds_w... | import os
import numpy as np
import argparse
import torch
import posa.data_utils as du
from tqdm import tqdm
from torch.utils.data import DataLoader
from posa.dataset import ProxDataset_txt
from util.model_util import create_model_and_diffusion | 2,428 |
# Example usage
# python predict_contact.py ../data/amass --load_model ../training/contactformer/model_ckpt/best_model_recon_acc.pt --output_dir ../results/amass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("data_dir", type=str,
help="path to POSA_temp dataset dir")
parser.add_argument("--load_model", type=str, default="../training/model_ckpt/epoch_0045.pt",
help="checkpoint path to load")
parser.add_argument("--encoder_mode", type=int, default=1,
help="different number represents different variants of encoder")
parser.add_argument("--decoder_mode", type=int, default=1,
help="different number represents different variants of decoder")
parser.add_argument("--n_layer", type=int, default=3, help="Number of layers in transformer")
parser.add_argument("--n_head", type=int, default=4, help="Number of heads in transformer")
parser.add_argument("--jump_step", type=int, default=8, help="Frame skip size for each input motion sequence")
parser.add_argument("--dim_ff", type=int, default=512,
help="Dimension of hidden layers in positionwise MLP in the transformer")
parser.add_argument("--f_vert", type=int, default=64, help="Dimension of the embeddings for body vertices")
parser.add_argument("--max_frame", type=int, default=256,
help="The maximum length of motion sequence (after frame skipping) which model accepts.")
parser.add_argument("--posa_path", type=str, default="../training/posa/model_ckpt/epoch_0349.pt",
help="The POSA model checkpoint that ContactFormer can pre-load")
parser.add_argument("--output_dir", type=str, default="../results/output")
parser.add_argument("--save_probability", dest='save_probability', action='store_const', const=True, default=False,
help="Save the probability of each contact labels, instead of the most possible contact label")
# Parse arguments and assign directories
args = parser.parse_args()
args_dict = vars(args)
data_dir = args_dict['data_dir']
ckpt_path = args_dict['load_model']
encoder_mode = args_dict['encoder_mode']
decoder_mode = args_dict['decoder_mode']
n_layer = args_dict['n_layer']
n_head = args_dict['n_head']
jump_step = args_dict['jump_step']
max_frame = args_dict['max_frame']
dim_ff = args_dict['dim_ff']
f_vert = args_dict['f_vert']
posa_path = args_dict['posa_path']
output_dir = args_dict['output_dir']
save_probability = args_dict['save_probability']
device = torch.device("cuda")
num_obj_classes = 8
pnt_size = 1024
# For fix_ori
fix_ori = True
ds_weights = torch.tensor(np.load("posa/support_files/downsampled_weights.npy"))
associated_joints = torch.argmax(ds_weights, dim=1)
os.makedirs(output_dir, exist_ok=True)
seq_name_list = sorted(os.listdir(os.path.join(data_dir, 'context')))
use_ddim = False # FIXME - hardcoded
clip_denoised = False # FIXME - hardcoded
# Setup names for output files
context_dir = os.path.join(data_dir, 'context')
files = os.listdir(context_dir)
up_tab = dict()
for file in files:look
reduced_file = file.split('.')[0]
with open(os.path.join(context_dir, file), 'r') as f:
prompt = f.readlines()[0].strip()
lookup_tab[prompt] = reduced_file
# Load in model checkpoints and set up data stream
|
# Example usage
# python predict_contact.py ../data/amass --load_model ../training/contactformer/model_ckpt/best_model_recon_acc.pt --output_dir ../results/amass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("data_dir", type=str,
help="path to POSA_temp dataset dir")
parser.add_argument("--load_model", type=str, default="../training/model_ckpt/epoch_0045.pt",
help="checkpoint path to load")
parser.add_argument("--encoder_mode", type=int, default=1,
help="different number represents different variants of encoder")
parser.add_argument("--decoder_mode", type=int, default=1,
help="different number represents different variants of decoder")
parser.add_argument("--n_layer", type=int, default=3, help="Number of layers in transformer")
parser.add_argument("--n_head", type=int, default=4, help="Number of heads in transformer")
parser.add_argument("--jump_step", type=int, default=8, help="Frame skip size for each input motion sequence")
parser.add_argument("--dim_ff", type=int, default=512,
help="Dimension of hidden layers in positionwise MLP in the transformer")
parser.add_argument("--f_vert", type=int, default=64, help="Dimension of the embeddings for body vertices")
parser.add_argument("--max_frame", type=int, default=256,
help="The maximum length of motion sequence (after frame skipping) which model accepts.")
parser.add_argument("--posa_path", type=str, default="../training/posa/model_ckpt/epoch_0349.pt",
help="The POSA model checkpoint that ContactFormer can pre-load")
parser.add_argument("--output_dir", type=str, default="../results/output")
parser.add_argument("--save_probability", dest='save_probability', action='store_const', const=True, default=False,
help="Save the probability of each contact labels, instead of the most possible contact label")
# Parse arguments and assign directories
args = parser.parse_args()
args_dict = vars(args)
data_dir = args_dict['data_dir']
ckpt_path = args_dict['load_model']
encoder_mode = args_dict['encoder_mode']
decoder_mode = args_dict['decoder_mode']
n_layer = args_dict['n_layer']
n_head = args_dict['n_head']
jump_step = args_dict['jump_step']
max_frame = args_dict['max_frame']
dim_ff = args_dict['dim_ff']
f_vert = args_dict['f_vert']
posa_path = args_dict['posa_path']
output_dir = args_dict['output_dir']
save_probability = args_dict['save_probability']
device = torch.device("cuda")
num_obj_classes = 8
pnt_size = 1024
# For fix_ori
fix_ori = True
ds_weights = torch.tensor(np.load("posa/support_files/downsampled_weights.npy"))
associated_joints = torch.argmax(ds_weights, dim=1)
os.makedirs(output_dir, exist_ok=True)
seq_name_list = sorted(os.listdir(os.path.join(data_dir, 'context')))
use_ddim = False # FIXME - hardcoded
clip_denoised = False # FIXME - hardcoded
# Setup names for output files
context_dir = os.path.join(data_dir, 'context')
files = os.listdir(context_dir)
up_tab = dict()
for file in files:look
reduced_file = file.split('.')[0]
with open(os.path.join(context_dir, file), 'r') as f:
prompt = f.readlines()[0].strip()
lookup_tab[prompt] = reduced_file
# Load in model checkpoints and set up data stream | valid_dataset = ProxDataset_txt(data_dir, max_frame=max_frame, fix_orientation=fix_ori, | 0 | 2023-11-06 07:55:51+00:00 | 4k |
molML/traversing_chem_space | active_learning/data_prep.py | [
{
"identifier": "molecular_graph_featurizer",
"path": "active_learning/utils.py",
"snippet": "def molecular_graph_featurizer(smiles: str, y=None, structural_feats: bool = True, functional_feats: bool = True):\n\n y = torch.tensor([y]).to(torch.long)\n\n mol = Chem.MolFromSmiles(smiles, sanitize=Tr... | from active_learning.utils import molecular_graph_featurizer as smiles_to_graph
from active_learning.utils import smiles_to_ecfp, get_tanimoto_matrix, check_featurizability
from collections import OrderedDict
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit import Chem
from tqdm import tqdm
from typing import Any
from config import ROOT_DIR
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import torch
import os
import sys
import h5py
import h5py
import h5py | 2,235 |
def canonicalize(smiles: str, sanitize: bool = True):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles, sanitize=sanitize))
def get_data(random_state: int = 42, dataset: str = 'ALDH1'):
# read smiles from file and canonicalize them
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/inactives.smi')) as f:
inactives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/actives.smi')) as f:
actives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
# remove duplicates:
inactives = list(set(inactives))
actives = list(set(actives))
# remove intersecting molecules:
intersecting_mols = np.intersect1d(inactives, actives)
inactives = [smi for smi in inactives if smi not in intersecting_mols]
actives = [smi for smi in actives if smi not in intersecting_mols]
# remove molecules that have scaffolds that cannot be kekulized or featurized
inactives_, actives_ = [], []
for smi in tqdm(actives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
actives_.append(smi)
except:
pass
for smi in tqdm(inactives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
inactives_.append(smi)
except:
pass
# add to df
df = pd.DataFrame({'smiles': inactives_ + actives_,
'y': [0] * len(inactives_) + [1] * len(actives_)})
# shuffle
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
return df
def split_data(df: pd.DataFrame, random_state: int = 42, screen_size: int = 50000, test_size: int = 10000,
dataset: str = 'ALDH1') -> (pd.DataFrame, pd.DataFrame):
df_screen, df_test = train_test_split(df, stratify=df['y'].tolist(), train_size=screen_size, test_size=test_size,
random_state=random_state)
# write to csv
df_screen.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/screen.csv'), index=False)
df_test.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/test.csv'), index=False)
return df_screen, df_test
class MasterDataset:
""" Dataset that holds all data in an indexable way """
def __init__(self, name: str, df: pd.DataFrame = None, dataset: str = 'ALDH1', representation: str = 'ecfp', root: str = 'data',
overwrite: bool = False) -> None:
assert representation in ['ecfp', 'graph'], f"'representation' must be 'ecfp' or 'graph', not {representation}"
self.representation = representation
self.pth = os.path.join(ROOT_DIR, root, dataset, name)
# If not done already, process all data. Else just load it
if not os.path.exists(self.pth) or overwrite:
assert df is not None, "You need to supply a dataframe with 'smiles' and 'y' values"
os.makedirs(os.path.join(root, dataset, name), exist_ok=True)
self.process(df)
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
else:
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
def process(self, df: pd.DataFrame) -> None:
print('Processing data ... ', flush=True, file=sys.stderr)
index_smiles = OrderedDict({i: smi for i, smi in enumerate(df.smiles)})
smiles_index = OrderedDict({smi: i for i, smi in enumerate(df.smiles)})
smiles = np.array(df.smiles.tolist())
|
def canonicalize(smiles: str, sanitize: bool = True):
return Chem.MolToSmiles(Chem.MolFromSmiles(smiles, sanitize=sanitize))
def get_data(random_state: int = 42, dataset: str = 'ALDH1'):
# read smiles from file and canonicalize them
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/inactives.smi')) as f:
inactives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
with open(os.path.join(ROOT_DIR, f'data/{dataset}/original/actives.smi')) as f:
actives = [canonicalize(smi.strip().split()[0]) for smi in f.readlines()]
# remove duplicates:
inactives = list(set(inactives))
actives = list(set(actives))
# remove intersecting molecules:
intersecting_mols = np.intersect1d(inactives, actives)
inactives = [smi for smi in inactives if smi not in intersecting_mols]
actives = [smi for smi in actives if smi not in intersecting_mols]
# remove molecules that have scaffolds that cannot be kekulized or featurized
inactives_, actives_ = [], []
for smi in tqdm(actives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
actives_.append(smi)
except:
pass
for smi in tqdm(inactives):
try:
if Chem.MolFromSmiles(smi_to_scaff(smi, includeChirality=False)) is not None:
if check_featurizability(smi):
inactives_.append(smi)
except:
pass
# add to df
df = pd.DataFrame({'smiles': inactives_ + actives_,
'y': [0] * len(inactives_) + [1] * len(actives_)})
# shuffle
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
return df
def split_data(df: pd.DataFrame, random_state: int = 42, screen_size: int = 50000, test_size: int = 10000,
dataset: str = 'ALDH1') -> (pd.DataFrame, pd.DataFrame):
df_screen, df_test = train_test_split(df, stratify=df['y'].tolist(), train_size=screen_size, test_size=test_size,
random_state=random_state)
# write to csv
df_screen.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/screen.csv'), index=False)
df_test.to_csv(os.path.join(ROOT_DIR, f'data/{dataset}/original/test.csv'), index=False)
return df_screen, df_test
class MasterDataset:
""" Dataset that holds all data in an indexable way """
def __init__(self, name: str, df: pd.DataFrame = None, dataset: str = 'ALDH1', representation: str = 'ecfp', root: str = 'data',
overwrite: bool = False) -> None:
assert representation in ['ecfp', 'graph'], f"'representation' must be 'ecfp' or 'graph', not {representation}"
self.representation = representation
self.pth = os.path.join(ROOT_DIR, root, dataset, name)
# If not done already, process all data. Else just load it
if not os.path.exists(self.pth) or overwrite:
assert df is not None, "You need to supply a dataframe with 'smiles' and 'y' values"
os.makedirs(os.path.join(root, dataset, name), exist_ok=True)
self.process(df)
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
else:
self.smiles_index, self.index_smiles, self.smiles, self.x, self.y, self.graphs = self.load()
def process(self, df: pd.DataFrame) -> None:
print('Processing data ... ', flush=True, file=sys.stderr)
index_smiles = OrderedDict({i: smi for i, smi in enumerate(df.smiles)})
smiles_index = OrderedDict({smi: i for i, smi in enumerate(df.smiles)})
smiles = np.array(df.smiles.tolist()) | x = smiles_to_ecfp(smiles, silent=False) | 1 | 2023-11-10 08:53:40+00:00 | 4k |
yunik1004/SAiD | script/inference.py | [
{
"identifier": "SAID_UNet1D",
"path": "said/model/diffusion.py",
"snippet": "class SAID_UNet1D(SAID):\n \"\"\"SAiD model implemented using U-Net 1D model\"\"\"\n\n def __init__(\n self,\n audio_config: Optional[Wav2Vec2Config] = None,\n audio_processor: Optional[Wav2Vec2Proce... | import argparse
import os
import torch
from diffusers import DDIMScheduler
from said.model.diffusion import SAID_UNet1D
from said.util.audio import fit_audio_unet, load_audio
from said.util.blendshape import (
load_blendshape_coeffs,
save_blendshape_coeffs,
save_blendshape_coeffs_image,
)
from dataset.dataset_voca import BlendVOCADataset | 2,487 | )
parser.add_argument(
"--intermediate_dir",
type=str,
default="../interm",
help="Saving directory of the intermediate outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--save_image",
type=bool,
default=False,
help="Save the output blendshape coefficients as an image",
)
parser.add_argument(
"--save_intermediate",
type=bool,
default=False,
help="Save the intermediate outputs",
)
parser.add_argument(
"--num_steps", type=int, default=1000, help="Number of inference steps"
)
parser.add_argument("--strength", type=float, default=1.0, help="How much to paint")
parser.add_argument(
"--guidance_scale", type=float, default=2.0, help="Guidance scale"
)
parser.add_argument(
"--guidance_rescale", type=float, default=0.0, help="Guidance scale"
)
parser.add_argument(
"--eta", type=float, default=0.0, help="Eta for DDIMScheduler, between [0, 1]"
)
parser.add_argument(
"--fps",
type=int,
default=60,
help="FPS of the blendshape coefficients sequence",
)
parser.add_argument(
"--divisor_unet",
type=int,
default=1,
help="Length of the blendshape coefficients sequence should be divided by this number",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--device",
type=str,
default="cuda:0",
help="GPU/CPU device",
)
parser.add_argument(
"--init_sample_path",
type=str,
help="Path of the initial sample file (csv format)",
)
parser.add_argument(
"--mask_path",
type=str,
help="Path of the mask file (csv format)",
)
args = parser.parse_args()
weights_path = args.weights_path
audio_path = args.audio_path
output_path = args.output_path
output_image_path = args.output_image_path
intermediate_dir = args.intermediate_dir
prediction_type = args.prediction_type
num_steps = args.num_steps
strength = args.strength
guidance_scale = args.guidance_scale
guidance_rescale = args.guidance_rescale
eta = args.eta
fps = args.fps
divisor_unet = args.divisor_unet
unet_feature_dim = args.unet_feature_dim
device = args.device
save_image = args.save_image
save_intermediate = args.save_intermediate
show_process = True
# Load init sample
init_sample_path = args.init_sample_path
init_samples = None
if init_sample_path is not None:
init_samples = load_blendshape_coeffs(init_sample_path).unsqueeze(0).to(device)
# Load mask
mask_path = args.mask_path
mask = None
if mask_path is not None:
mask = load_blendshape_coeffs(mask_path).unsqueeze(0).to(device)
# Load model
said_model = SAID_UNet1D(
noise_scheduler=DDIMScheduler,
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
)
said_model.load_state_dict(torch.load(weights_path, map_location=device))
said_model.to(device)
said_model.eval()
# Load data
waveform = load_audio(audio_path, said_model.sampling_rate)
# waveform = torch.zeros_like(waveform)
# Fit the size of waveform
| """Inference using the SAID_UNet1D model
"""
def main():
"""Main function"""
# Arguments
parser = argparse.ArgumentParser(
description="Inference the lipsync using the SAiD model"
)
parser.add_argument(
"--weights_path",
type=str,
default="../BlendVOCA/SAiD.pth",
help="Path of the weights of SAiD model",
)
parser.add_argument(
"--audio_path",
type=str,
default="../BlendVOCA/audio/FaceTalk_170731_00024_TA/sentence01.wav",
help="Path of the audio file",
)
parser.add_argument(
"--output_path",
type=str,
default="../out.csv",
help="Path of the output blendshape coefficients file (csv format)",
)
parser.add_argument(
"--output_image_path",
type=str,
default="../out.png",
help="Path of the image of the output blendshape coefficients",
)
parser.add_argument(
"--intermediate_dir",
type=str,
default="../interm",
help="Saving directory of the intermediate outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--save_image",
type=bool,
default=False,
help="Save the output blendshape coefficients as an image",
)
parser.add_argument(
"--save_intermediate",
type=bool,
default=False,
help="Save the intermediate outputs",
)
parser.add_argument(
"--num_steps", type=int, default=1000, help="Number of inference steps"
)
parser.add_argument("--strength", type=float, default=1.0, help="How much to paint")
parser.add_argument(
"--guidance_scale", type=float, default=2.0, help="Guidance scale"
)
parser.add_argument(
"--guidance_rescale", type=float, default=0.0, help="Guidance scale"
)
parser.add_argument(
"--eta", type=float, default=0.0, help="Eta for DDIMScheduler, between [0, 1]"
)
parser.add_argument(
"--fps",
type=int,
default=60,
help="FPS of the blendshape coefficients sequence",
)
parser.add_argument(
"--divisor_unet",
type=int,
default=1,
help="Length of the blendshape coefficients sequence should be divided by this number",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--device",
type=str,
default="cuda:0",
help="GPU/CPU device",
)
parser.add_argument(
"--init_sample_path",
type=str,
help="Path of the initial sample file (csv format)",
)
parser.add_argument(
"--mask_path",
type=str,
help="Path of the mask file (csv format)",
)
args = parser.parse_args()
weights_path = args.weights_path
audio_path = args.audio_path
output_path = args.output_path
output_image_path = args.output_image_path
intermediate_dir = args.intermediate_dir
prediction_type = args.prediction_type
num_steps = args.num_steps
strength = args.strength
guidance_scale = args.guidance_scale
guidance_rescale = args.guidance_rescale
eta = args.eta
fps = args.fps
divisor_unet = args.divisor_unet
unet_feature_dim = args.unet_feature_dim
device = args.device
save_image = args.save_image
save_intermediate = args.save_intermediate
show_process = True
# Load init sample
init_sample_path = args.init_sample_path
init_samples = None
if init_sample_path is not None:
init_samples = load_blendshape_coeffs(init_sample_path).unsqueeze(0).to(device)
# Load mask
mask_path = args.mask_path
mask = None
if mask_path is not None:
mask = load_blendshape_coeffs(mask_path).unsqueeze(0).to(device)
# Load model
said_model = SAID_UNet1D(
noise_scheduler=DDIMScheduler,
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
)
said_model.load_state_dict(torch.load(weights_path, map_location=device))
said_model.to(device)
said_model.eval()
# Load data
waveform = load_audio(audio_path, said_model.sampling_rate)
# waveform = torch.zeros_like(waveform)
# Fit the size of waveform | fit_output = fit_audio_unet(waveform, said_model.sampling_rate, fps, divisor_unet) | 1 | 2023-11-03 06:38:51+00:00 | 4k |
Harvard-Ophthalmology-AI-Lab/FairSeg | SAMed/segment_anything/utils/onnx.py | [
{
"identifier": "Sam",
"path": "SAMed/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_... | import torch
import torch.nn as nn
from torch.nn import functional as F
from typing import Tuple
from ..modeling import Sam
from .amg import calculate_stability_score | 3,262 | # All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamOnnxModel(nn.Module):
"""
This model should not be called directly, but is used in ONNX export.
It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
with some functions modified to enable model tracing. Also supports extra
options controlling what information. See the ONNX export script for details.
"""
def __init__(
self,
model: Sam,
return_single_mask: bool,
use_stability_score: bool = False,
return_extra_metrics: bool = False,
) -> None:
super().__init__()
self.mask_decoder = model.mask_decoder
self.model = model
self.img_size = model.image_encoder.img_size
self.return_single_mask = return_single_mask
self.use_stability_score = use_stability_score
self.stability_score_offset = 1.0
self.return_extra_metrics = return_extra_metrics
@staticmethod
def resize_longest_image_size(
input_image_size: torch.Tensor, longest_side: int
) -> torch.Tensor:
input_image_size = input_image_size.to(torch.float32)
scale = longest_side / torch.max(input_image_size)
transformed_size = scale * input_image_size
transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
return transformed_size
def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
point_coords = point_coords + 0.5
point_coords = point_coords / self.img_size
point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
point_embedding = point_embedding * (point_labels != -1)
point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
point_labels == -1
)
for i in range(self.model.prompt_encoder.num_point_embeddings):
point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
i
].weight * (point_labels == i)
return point_embedding
def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
mask_embedding = mask_embedding + (
1 - has_mask_input
) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
return mask_embedding
def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
masks = F.interpolate(
masks,
size=(self.img_size, self.img_size),
mode="bilinear",
align_corners=False,
)
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)
masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]
orig_im_size = orig_im_size.to(torch.int64)
h, w = orig_im_size[0], orig_im_size[1]
masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
return masks
def select_masks(
self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
) -> Tuple[torch.Tensor, torch.Tensor]:
# Determine if we should return the multiclick mask or not from the number of points.
# The reweighting is used to avoid control flow.
score_reweight = torch.tensor(
[[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
).to(iou_preds.device)
score = iou_preds + (num_points - 2.5) * score_reweight
best_idx = torch.argmax(score, dim=1)
masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
return masks, iou_preds
@torch.no_grad()
def forward(
self,
image_embeddings: torch.Tensor,
point_coords: torch.Tensor,
point_labels: torch.Tensor,
mask_input: torch.Tensor,
has_mask_input: torch.Tensor,
orig_im_size: torch.Tensor,
):
sparse_embedding = self._embed_points(point_coords, point_labels)
dense_embedding = self._embed_masks(mask_input, has_mask_input)
masks, scores = self.model.mask_decoder.predict_masks(
image_embeddings=image_embeddings,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embedding,
dense_prompt_embeddings=dense_embedding,
)
if self.use_stability_score:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamOnnxModel(nn.Module):
"""
This model should not be called directly, but is used in ONNX export.
It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
with some functions modified to enable model tracing. Also supports extra
options controlling what information. See the ONNX export script for details.
"""
def __init__(
self,
model: Sam,
return_single_mask: bool,
use_stability_score: bool = False,
return_extra_metrics: bool = False,
) -> None:
super().__init__()
self.mask_decoder = model.mask_decoder
self.model = model
self.img_size = model.image_encoder.img_size
self.return_single_mask = return_single_mask
self.use_stability_score = use_stability_score
self.stability_score_offset = 1.0
self.return_extra_metrics = return_extra_metrics
@staticmethod
def resize_longest_image_size(
input_image_size: torch.Tensor, longest_side: int
) -> torch.Tensor:
input_image_size = input_image_size.to(torch.float32)
scale = longest_side / torch.max(input_image_size)
transformed_size = scale * input_image_size
transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
return transformed_size
def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
point_coords = point_coords + 0.5
point_coords = point_coords / self.img_size
point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
point_embedding = point_embedding * (point_labels != -1)
point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
point_labels == -1
)
for i in range(self.model.prompt_encoder.num_point_embeddings):
point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
i
].weight * (point_labels == i)
return point_embedding
def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
mask_embedding = mask_embedding + (
1 - has_mask_input
) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
return mask_embedding
def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
masks = F.interpolate(
masks,
size=(self.img_size, self.img_size),
mode="bilinear",
align_corners=False,
)
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)
masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]
orig_im_size = orig_im_size.to(torch.int64)
h, w = orig_im_size[0], orig_im_size[1]
masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
return masks
def select_masks(
self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
) -> Tuple[torch.Tensor, torch.Tensor]:
# Determine if we should return the multiclick mask or not from the number of points.
# The reweighting is used to avoid control flow.
score_reweight = torch.tensor(
[[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
).to(iou_preds.device)
score = iou_preds + (num_points - 2.5) * score_reweight
best_idx = torch.argmax(score, dim=1)
masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
return masks, iou_preds
@torch.no_grad()
def forward(
self,
image_embeddings: torch.Tensor,
point_coords: torch.Tensor,
point_labels: torch.Tensor,
mask_input: torch.Tensor,
has_mask_input: torch.Tensor,
orig_im_size: torch.Tensor,
):
sparse_embedding = self._embed_points(point_coords, point_labels)
dense_embedding = self._embed_masks(mask_input, has_mask_input)
masks, scores = self.model.mask_decoder.predict_masks(
image_embeddings=image_embeddings,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embedding,
dense_prompt_embeddings=dense_embedding,
)
if self.use_stability_score: | scores = calculate_stability_score( | 1 | 2023-11-03 17:05:40+00:00 | 4k |
anand2312/quill-server | quill_server/realtime/events.py | [
{
"identifier": "User",
"path": "quill_server/db/models.py",
"snippet": "class User(Base):\n __tablename__ = \"user\"\n\n id: Mapped[UUID] = mapped_column(pg_UUID(as_uuid=True), primary_key=True, default=uuid4) # noqa: A003\n username: Mapped[str] = mapped_column(unique=True)\n password: Ma... | from enum import StrEnum, auto
from functools import partial
from typing import Any, Generic, TypeVar
from collections.abc import Awaitable
from loguru import logger
from pydantic import BaseModel
from redis.asyncio import Redis
from quill_server.db.models import User
from quill_server.realtime.room import GameMember, Room, ChatMessage, _db_user_to_game_member
from quill_server.schema import MessageResponse
import typing | 2,061 |
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel):
user: GameMember
elements: list[ExcalidrawElement]
class EventType(StrEnum):
START = auto() # sent by the user to the server to trigger a game start
CONNECT = auto() # sent to the newly joined user
MEMBER_JOIN = auto() # sent to all connected users when a new user joins
MEMBER_LEAVE = auto() # sent to all connected users when a user disconnects from the room
OWNER_CHANGE = auto() # sent when the room owner changes
GAME_STATE_CHANGE = auto() # sent when the game starts or ends
MESSAGE = auto() # sent when any user sends a message in the chat
CORRECT_GUESS = auto() # sent when any user makes a correct guess
DRAWING = auto() # sent when a user is drawing on the board
TURN_START = auto() # sent when a new turn starts
TURN_END = auto() # sent when a turn ends
ERROR = auto() # sent to a user if it tries some illegal action
class Event(BaseModel, Generic[DataT]):
"""An event to be broadcasted."""
event_type: EventType
data: DataT
ConnectEvent = partial(Event[Room], event_type=EventType.CONNECT)
MemberJoinEvent = partial(Event[GameMember], event_type=EventType.MEMBER_JOIN)
MemberLeaveEvent = partial(Event[GameMember], event_type=EventType.MEMBER_LEAVE)
ChatMessageEvent = partial(Event[ChatMessage], event_type=EventType.MESSAGE)
CorrectGuessEvent = partial(Event[ChatMessage], event_type=EventType.CORRECT_GUESS)
GameStateChangeEvent = partial(Event[Room], event_type=EventType.GAME_STATE_CHANGE)
DrawingEvent = partial(Event[Drawing], event_type=EventType.DRAWING)
async def process_message(msg: dict[str, Any], room: Room, user: User, conn: Redis) -> Event:
event_type = msg.get("event_type")
event_data = msg.get("data")
if not event_type:
raise ValueError("Malformed message - no event_type found")
if not event_data:
raise ValueError("Malformed message - no event data found")
match EventType(event_type):
case EventType.START:
if str(user.id) == room.owner.user_id:
await room.start()
return GameStateChangeEvent(data=room)
else:
# user is not the room owner
|
DataT = TypeVar("DataT", bound=BaseModel)
# the excalidraw element event contains many fields
# https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141
ExcalidrawElement = dict[str, Any]
class Drawing(BaseModel):
user: GameMember
elements: list[ExcalidrawElement]
class EventType(StrEnum):
START = auto() # sent by the user to the server to trigger a game start
CONNECT = auto() # sent to the newly joined user
MEMBER_JOIN = auto() # sent to all connected users when a new user joins
MEMBER_LEAVE = auto() # sent to all connected users when a user disconnects from the room
OWNER_CHANGE = auto() # sent when the room owner changes
GAME_STATE_CHANGE = auto() # sent when the game starts or ends
MESSAGE = auto() # sent when any user sends a message in the chat
CORRECT_GUESS = auto() # sent when any user makes a correct guess
DRAWING = auto() # sent when a user is drawing on the board
TURN_START = auto() # sent when a new turn starts
TURN_END = auto() # sent when a turn ends
ERROR = auto() # sent to a user if it tries some illegal action
class Event(BaseModel, Generic[DataT]):
"""An event to be broadcasted."""
event_type: EventType
data: DataT
ConnectEvent = partial(Event[Room], event_type=EventType.CONNECT)
MemberJoinEvent = partial(Event[GameMember], event_type=EventType.MEMBER_JOIN)
MemberLeaveEvent = partial(Event[GameMember], event_type=EventType.MEMBER_LEAVE)
ChatMessageEvent = partial(Event[ChatMessage], event_type=EventType.MESSAGE)
CorrectGuessEvent = partial(Event[ChatMessage], event_type=EventType.CORRECT_GUESS)
GameStateChangeEvent = partial(Event[Room], event_type=EventType.GAME_STATE_CHANGE)
DrawingEvent = partial(Event[Drawing], event_type=EventType.DRAWING)
async def process_message(msg: dict[str, Any], room: Room, user: User, conn: Redis) -> Event:
event_type = msg.get("event_type")
event_data = msg.get("data")
if not event_type:
raise ValueError("Malformed message - no event_type found")
if not event_data:
raise ValueError("Malformed message - no event data found")
match EventType(event_type):
case EventType.START:
if str(user.id) == room.owner.user_id:
await room.start()
return GameStateChangeEvent(data=room)
else:
# user is not the room owner | data = MessageResponse(message="You do not own this room") | 5 | 2023-11-03 12:43:18+00:00 | 4k |
microsoft/PLEX | PLEX/models/trajectory_models/model.py | [
{
"identifier": "GaussianHead",
"path": "PLEX/models/heads/distributions.py",
"snippet": "class GaussianHead(nn.Module):\n def __init__(self, input_dim, output_dim, std_bounds,\n hidden_dim=None, squash=False):\n super().__init__()\n self.input_dim = input_dim\n s... | import torch
import torch.nn as nn
import torch.distributions as D
import math
import torchvision
import os
import PLEX.util.globals as globals
import robomimic.utils.obs_utils as ObsUtils
from robomimic.models.base_nets import SpatialSoftmax, SpatialMeanPool, Module
from robomimic.models.obs_nets import obs_encoder_factory, ObservationEncoder
from torchvision.models.resnet import BasicBlock, Bottleneck
from PLEX.models.heads.distributions import GaussianHead, GaussianMixtureHead
from PLEX.models.encoders.vision import R3M_Module
from r3m.models.models_r3m import R3M
from r3m import load_r3m_from_path
from r3m import load_r3m | 1,972 |
def _action_loss(action_preds, action_targets, mask):
if isinstance(action_preds, D.Distribution):
# minimize negative log-likelihood, i.e. maximize likelihood
unmasked_losses = -action_preds.log_prob(action_targets)
elif torch.is_tensor(action_preds):
# minimize mean squared error
unmasked_losses = torch.mean((action_preds - action_targets)**2, dim=-1)
else:
raise RuntimeError(f'Invalid action_preds: {action_preds}')
# consider loss only in positions where mask = 1
assert unmasked_losses.shape == mask.shape
selected_losses = unmasked_losses[mask.bool()]
return selected_losses.mean()
class TrajectoryModel(nn.Module):
def __init__(self, camera_names, obs_dims,
proprio_dim, act_dim, hidden_dim,
image_encoder_arch='resnet18',
image_encoder_load=None,
use_random_crops=True,
pool_type='SpatialSoftmax',
action_output_type='gaussian',
action_tanh=True,
std_bounds=None,
impute_style=None,
data_dir=None,
history_len=None,
modalities_to_mask=['action'],
bc_mode=True):
super().__init__()
self.camera_names = camera_names
self.obs_dims = obs_dims
self.proprio_dim = proprio_dim
self.act_dim = act_dim
self.hidden_dim = hidden_dim
self.image_encoder_arch = image_encoder_arch
self.image_encoder_load = image_encoder_load
self.use_random_crops = use_random_crops
self.pool_type = pool_type
self.action_output_type = action_output_type
self.data_dir = data_dir
self.history_len = history_len
self.bc_mode = bc_mode
assert type(modalities_to_mask) == list
self.modalities_to_mask = modalities_to_mask
# In behavior cloning mode, we don't condidtion on context return.
# To implement this, we will map context return to a fixed embedding in this mode.
if self.bc_mode and 'return' not in self.modalities_to_mask:
self.modalities_to_mask.append('return')
self.action_tanh = action_tanh
self.std_bounds = std_bounds
assert len(std_bounds) == 2 and std_bounds[0] < std_bounds[1]
# For embedding inputs
self.return_encoder = nn.Linear(1, hidden_dim)
self.action_encoder = nn.Linear(act_dim, hidden_dim)
# If we are in image-based mode, we will need image and proprio encoders.
if not globals.full_state_mode:
self.proprio_encoder = nn.Linear(proprio_dim, hidden_dim)
# For Robomimic's resnet18 encoder, we have to tell the encoder what its output dim should be.
if self.image_encoder_arch == 'resnet18':
self.image_encoder_feature_dim = 64
self.image_encoder = self._create_image_encoder()
# For R3M, we just take the output dim from R3M itself.
if self.image_encoder_arch.startswith('r3m'):
self.image_encoder_feature_dim = int(self.image_encoder.output_shape()[0] / len(camera_names))
# For combining embeddings of images into single state
self.image_obs_combiner = nn.Linear(
self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.image_and_proprio_emb_combiner = nn.Linear(
hidden_dim + hidden_dim,
hidden_dim
)
# For combining embeddings of proprio data and images into single state
self.obs_combiner = nn.Linear(
hidden_dim + self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.context_encoder = self.image_encoder
else: # Otherwise we are in low-dimensional mode and we will need full state encoders.
assert type(self.obs_dims) == int
self.state_encoder = nn.Linear(self.obs_dims, hidden_dim)
self.context_encoder = self.state_encoder
# For predicting outputs
action_input_dim = hidden_dim
self.predict_proprio = torch.nn.Linear(hidden_dim, self.proprio_dim)
if self.action_output_type == 'gaussian_mixture':
num_components = 5
|
def _action_loss(action_preds, action_targets, mask):
if isinstance(action_preds, D.Distribution):
# minimize negative log-likelihood, i.e. maximize likelihood
unmasked_losses = -action_preds.log_prob(action_targets)
elif torch.is_tensor(action_preds):
# minimize mean squared error
unmasked_losses = torch.mean((action_preds - action_targets)**2, dim=-1)
else:
raise RuntimeError(f'Invalid action_preds: {action_preds}')
# consider loss only in positions where mask = 1
assert unmasked_losses.shape == mask.shape
selected_losses = unmasked_losses[mask.bool()]
return selected_losses.mean()
class TrajectoryModel(nn.Module):
def __init__(self, camera_names, obs_dims,
proprio_dim, act_dim, hidden_dim,
image_encoder_arch='resnet18',
image_encoder_load=None,
use_random_crops=True,
pool_type='SpatialSoftmax',
action_output_type='gaussian',
action_tanh=True,
std_bounds=None,
impute_style=None,
data_dir=None,
history_len=None,
modalities_to_mask=['action'],
bc_mode=True):
super().__init__()
self.camera_names = camera_names
self.obs_dims = obs_dims
self.proprio_dim = proprio_dim
self.act_dim = act_dim
self.hidden_dim = hidden_dim
self.image_encoder_arch = image_encoder_arch
self.image_encoder_load = image_encoder_load
self.use_random_crops = use_random_crops
self.pool_type = pool_type
self.action_output_type = action_output_type
self.data_dir = data_dir
self.history_len = history_len
self.bc_mode = bc_mode
assert type(modalities_to_mask) == list
self.modalities_to_mask = modalities_to_mask
# In behavior cloning mode, we don't condidtion on context return.
# To implement this, we will map context return to a fixed embedding in this mode.
if self.bc_mode and 'return' not in self.modalities_to_mask:
self.modalities_to_mask.append('return')
self.action_tanh = action_tanh
self.std_bounds = std_bounds
assert len(std_bounds) == 2 and std_bounds[0] < std_bounds[1]
# For embedding inputs
self.return_encoder = nn.Linear(1, hidden_dim)
self.action_encoder = nn.Linear(act_dim, hidden_dim)
# If we are in image-based mode, we will need image and proprio encoders.
if not globals.full_state_mode:
self.proprio_encoder = nn.Linear(proprio_dim, hidden_dim)
# For Robomimic's resnet18 encoder, we have to tell the encoder what its output dim should be.
if self.image_encoder_arch == 'resnet18':
self.image_encoder_feature_dim = 64
self.image_encoder = self._create_image_encoder()
# For R3M, we just take the output dim from R3M itself.
if self.image_encoder_arch.startswith('r3m'):
self.image_encoder_feature_dim = int(self.image_encoder.output_shape()[0] / len(camera_names))
# For combining embeddings of images into single state
self.image_obs_combiner = nn.Linear(
self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.image_and_proprio_emb_combiner = nn.Linear(
hidden_dim + hidden_dim,
hidden_dim
)
# For combining embeddings of proprio data and images into single state
self.obs_combiner = nn.Linear(
hidden_dim + self.image_encoder_feature_dim * len(camera_names),
hidden_dim
)
self.context_encoder = self.image_encoder
else: # Otherwise we are in low-dimensional mode and we will need full state encoders.
assert type(self.obs_dims) == int
self.state_encoder = nn.Linear(self.obs_dims, hidden_dim)
self.context_encoder = self.state_encoder
# For predicting outputs
action_input_dim = hidden_dim
self.predict_proprio = torch.nn.Linear(hidden_dim, self.proprio_dim)
if self.action_output_type == 'gaussian_mixture':
num_components = 5 | self.predict_action = GaussianMixtureHead( | 1 | 2023-11-06 09:38:09+00:00 | 4k |
S3raphimCS/Hackathon_telehack | backend/SPO_KROT/metrics/views.py | [
{
"identifier": "ExcelFile",
"path": "backend/SPO_KROT/metrics/models.py",
"snippet": "class ExcelFile(models.Model):\n file = models.FileField(\n upload_to='metrics',\n unique=True,\n blank=True, null=True,\n validators=[FileExtensionValidator(['xlsx', 'xls', 'xlsm'])],\n... | from re import split as resplit
from datefinder import find_dates
from django.db.models import CharField, DateField, Q
from django.utils.translation import gettext_lazy as _
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from openpyxl import load_workbook
from rest_framework import generics, status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from users.models import CustomUser
from .models import ExcelFile, Measurements, Operator, Report
from .serializers import (ExcelUploadSerializer, ReportDetailSerializer,
ReportListSerializer) | 2,065 |
update_metrics_example = [
{
"id": 13,
"voice_service_non_accessibility": "0.3",
"voice_service_cut_off": "0.8",
"speech_quality_on_call": "4.2",
"negative_mos_samples_ratio": "0.3",
"undelivered_messages": "2.4",
"avg_sms_delivery_time": "6.3",
"http_failure_session": "2.2",
"http_ul_mean_userdata_rate": "2488.1",
"http_dl_mean_userdata_rate": "9700.9",
"http_session_time": "10.9",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
},
{
"id": 13,
"voice_service_non_accessibility": "0.1",
"voice_service_cut_off": "0.5",
"speech_quality_on_call": "4.5",
"negative_mos_samples_ratio": "0.1",
"undelivered_messages": "2.8",
"avg_sms_delivery_time": "6.6",
"http_failure_session": "2.6",
"http_ul_mean_userdata_rate": "2488.9",
"http_dl_mean_userdata_rate": "9700.2",
"http_session_time": "10.3",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
}
]
class ReportDetailView(generics.RetrieveAPIView):
"""Возвращает информацию о конретном отчет по ID."""
lookup_field = 'pk'
|
update_metrics_example = [
{
"id": 13,
"voice_service_non_accessibility": "0.3",
"voice_service_cut_off": "0.8",
"speech_quality_on_call": "4.2",
"negative_mos_samples_ratio": "0.3",
"undelivered_messages": "2.4",
"avg_sms_delivery_time": "6.3",
"http_failure_session": "2.2",
"http_ul_mean_userdata_rate": "2488.1",
"http_dl_mean_userdata_rate": "9700.9",
"http_session_time": "10.9",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
},
{
"id": 13,
"voice_service_non_accessibility": "0.1",
"voice_service_cut_off": "0.5",
"speech_quality_on_call": "4.5",
"negative_mos_samples_ratio": "0.1",
"undelivered_messages": "2.8",
"avg_sms_delivery_time": "6.6",
"http_failure_session": "2.6",
"http_ul_mean_userdata_rate": "2488.9",
"http_dl_mean_userdata_rate": "9700.2",
"http_session_time": "10.3",
"number_of_test_voice_connections": 7818,
"number_of_voice_sequences": 147909,
"voice_connections_with_low_intelligibility": 374,
"number_of_sms_messages": 500,
"number_of_connections_attempts_http": 1729,
"number_of_test_sessions_http": 2204
}
]
class ReportDetailView(generics.RetrieveAPIView):
"""Возвращает информацию о конретном отчет по ID."""
lookup_field = 'pk' | queryset = Report.objects.all() | 3 | 2023-11-09 12:55:04+00:00 | 4k |
lz1oceani/LLM-As-Hierarchical-Policy | hlm/utils/answer_utils.py | [
{
"identifier": "filter_stripped_lines",
"path": "hlm/utils/text_utils.py",
"snippet": "def filter_stripped_lines(lines):\n return [_.strip() for _ in lines if len(_.strip()) > 0]"
},
{
"identifier": "unique_texts",
"path": "hlm/utils/text_utils.py",
"snippet": "def unique_texts(texts... | import numpy as np, time, re, signal, math, os, warnings
from numbers import Number
from sympy import Symbol, Eq, simplify, solve
from sympy.parsing.latex import parse_latex
from math import *
from .text_utils import filter_stripped_lines, unique_texts, all_matched_pos
from .math_answer_utils import normalize_numbers, unwrap_latex_env, clean_up_latex_answer
from .metric_utils import compare_items | 1,794 | NO_ANSWER_TEMPLATE = [
"we can(not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we do( not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we can(not|n't) (determine|find)",
"there (are|is) no (solutions|answer|answers)" "the answer(.*?)is unknown",
"Finally,(.*?)to find the answer.$",
]
def get_answer_type(dataset_name):
dataset_name = dataset_name.lower()
num_names = ["gsm"]
latex_names = ["math"]
check = lambda name, keywords: any([_ in name for _ in keywords])
if check(dataset_name, num_names):
return "number"
elif check(dataset_name, latex_names):
return "latex"
else:
raise NotImplementedError
def get_re_templates(answer_type, choices=None):
templates = {
"number": ["(-?\(\d+\/\d+\)\/\d+|-?\d+\/\d+)", "(-?\d[\d,\. ]*)"],
"latex": [],
}
return templates.get(answer_type, None)
def extract_all_numbers(text):
templates = get_re_templates("number", None)
for template in templates:
nums = re.findall(template, text)
if len(nums) > 0:
return nums
return []
def extract_all_expressions(text):
if "$" in text:
text = text.replace("$\$", "$")
num = text.count("$")
if num % 2 == 0:
return list(re.findall(r"\$([^\$]*)\$", text))
else:
return []
pairs = [[r"\[", r"\]"], [r"\\begin\{align\}", r"\\end\{align\}"], [r"\\begin\{align\*\}", r"\\end\{align\*\}"]]
ret = []
for start, end in pairs:
sign = re.search(start, text) is not None and re.search(end, text) is not None
if sign:
ret += re.findall(rf"{start}([^{start}{end}]*){end}", text)
return ret
def extract_text_answer(text, answer_type=None, final_answer=None):
templates = get_re_templates(answer_type, None)
split_words = ["Therefore", ", so", "is"]
def remove_equal(nums):
if answer_type == "number" or "=" not in final_answer:
tmp = []
for num in nums:
if "=" in num:
num = num.split("=")[-1].strip()
if "\equiv" in num:
num = re.split(r"\\equiv", num)[-1].strip()
tmp.append(num)
nums = tmp
return nums
if "\\boxed" in text:
text = unwrap_latex_env(text, "boxed", is_single=True)
text = unwrap_latex_env(text, "textsf", is_single=False)
return remove_equal(clean_up_latex_answer(text))[0]
check = lambda _: ("$" in _ or "\[" in _) and answer_type == "latex"
clean_up_fn = clean_up_latex_answer if answer_type == "latex" else normalize_numbers
nums = []
for pos in all_matched_pos(split_words, text)[::-1]:
extract_fn = extract_all_expressions if check(text[pos:]) else extract_all_numbers
nums = extract_fn(text[pos:])
if len(nums) > 0:
break
if len(nums) == 0:
extract_fn = extract_all_expressions if check(text) else extract_all_numbers
nums = extract_fn(text)
if len(nums) >= 1:
nums = remove_equal(nums)
for num in nums:
if compare_items(num, final_answer, answer_type if answer_type == "number" else "text"): # About %1 in GSM
return clean_up_fn(num)
ret = nums[0]
return clean_up_fn(ret)
else:
return None
def extract_answer_from_sentence(sentence):
ret = sentence
for pattern in ANSWER_SPLIT_PATTERNS:
indices = list(re.finditer(pattern, sentence, flags=re.IGNORECASE))
if len(indices) > 0:
tmp = sentence[indices[-1].start() :]
if len(tmp) < len(ret):
ret = tmp
return ret
def extract_answers(responses, answer_type=None, max_num_lines=3, final_answer=None, **kwargs):
if isinstance(responses, list):
return [extract_answers(_, answer_type, max_num_lines, final_answer, **kwargs) for _ in responses]
sentences = re.split(r"\n", responses) # Split text by new line or latex expression \]
sentences = [re.sub(r"^#?\d+\. ", "", _) for _ in sentences] # remove starting #1, #2, ...
sentences = [_ for _ in sentences if not _.strip("#").lower().startswith("reference:")] # remove reference lines in Natural Program
|
warnings.simplefilter("ignore", SyntaxWarning)
class TimeoutException(Exception):
pass
ANSWER_SPLIT_PATTERNS = [
"answer is:?",
"answer:",
"answer to (?:the|this) question is",
# last letters
"concatenated letters are",
"concatenate the letters -",
"The answer of ",
]
NEGATIVE_PATTERNS = [
"is not needed to answer the question",
]
ANSWER_PREFIX = [
"answer: ",
"Therefore, there will be ",
"Therefore, \w+ have ",
"Therefore, \w+ and \w+ have ",
"Therefore, \w+ has ",
"Therefore,(.*?)is ",
"there (are|is) ",
"answer to(.*?)is ",
"answer to(.*?)will be ",
"answer to(.*?)would be ",
"answer to(.*?)becomes ",
"Therefore,(.*?)will be ",
"Therefore,(.*?)would be ",
"Therefore,(.*?)cost ",
"Therefore,(.*?)costs ",
"Therefore,(.*?)a total of ",
"There will be ",
"Therefore, ",
"[A-Z]\w+ will have ",
"[A-Z]\w+ have ",
"[A-Z]\w+ has ",
"\w+ still has ",
"^[A-Z]\w+ \w+ ",
" is ",
]
NUMBER_FIX_MAP = {
" zero ": " 0 ",
" no ": " 0 ",
" a ": " 1 ",
" one ": " 1 ",
" two ": " 2 ",
" three ": " 3 ",
" four ": " 4 ",
" five ": " 5 ",
" six ": " 6 ",
" seven ": " 7 ",
" eight ": " 8 ",
" nine ": " 9 ",
" ten ": " 10 ",
"\u2013": "-",
"hundred": "*100",
"thousand": "*1000",
"million": "*(10**6)",
"billion": "*(10**9)",
"trillion": "*(10**12)",
}
NO_ANSWER_TEMPLATE = [
"we can(not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we do( not|n't)(?:.*)answer(?:.*)(?:the|this) question",
"we can(not|n't) (determine|find)",
"there (are|is) no (solutions|answer|answers)" "the answer(.*?)is unknown",
"Finally,(.*?)to find the answer.$",
]
def get_answer_type(dataset_name):
dataset_name = dataset_name.lower()
num_names = ["gsm"]
latex_names = ["math"]
check = lambda name, keywords: any([_ in name for _ in keywords])
if check(dataset_name, num_names):
return "number"
elif check(dataset_name, latex_names):
return "latex"
else:
raise NotImplementedError
def get_re_templates(answer_type, choices=None):
templates = {
"number": ["(-?\(\d+\/\d+\)\/\d+|-?\d+\/\d+)", "(-?\d[\d,\. ]*)"],
"latex": [],
}
return templates.get(answer_type, None)
def extract_all_numbers(text):
templates = get_re_templates("number", None)
for template in templates:
nums = re.findall(template, text)
if len(nums) > 0:
return nums
return []
def extract_all_expressions(text):
if "$" in text:
text = text.replace("$\$", "$")
num = text.count("$")
if num % 2 == 0:
return list(re.findall(r"\$([^\$]*)\$", text))
else:
return []
pairs = [[r"\[", r"\]"], [r"\\begin\{align\}", r"\\end\{align\}"], [r"\\begin\{align\*\}", r"\\end\{align\*\}"]]
ret = []
for start, end in pairs:
sign = re.search(start, text) is not None and re.search(end, text) is not None
if sign:
ret += re.findall(rf"{start}([^{start}{end}]*){end}", text)
return ret
def extract_text_answer(text, answer_type=None, final_answer=None):
templates = get_re_templates(answer_type, None)
split_words = ["Therefore", ", so", "is"]
def remove_equal(nums):
if answer_type == "number" or "=" not in final_answer:
tmp = []
for num in nums:
if "=" in num:
num = num.split("=")[-1].strip()
if "\equiv" in num:
num = re.split(r"\\equiv", num)[-1].strip()
tmp.append(num)
nums = tmp
return nums
if "\\boxed" in text:
text = unwrap_latex_env(text, "boxed", is_single=True)
text = unwrap_latex_env(text, "textsf", is_single=False)
return remove_equal(clean_up_latex_answer(text))[0]
check = lambda _: ("$" in _ or "\[" in _) and answer_type == "latex"
clean_up_fn = clean_up_latex_answer if answer_type == "latex" else normalize_numbers
nums = []
for pos in all_matched_pos(split_words, text)[::-1]:
extract_fn = extract_all_expressions if check(text[pos:]) else extract_all_numbers
nums = extract_fn(text[pos:])
if len(nums) > 0:
break
if len(nums) == 0:
extract_fn = extract_all_expressions if check(text) else extract_all_numbers
nums = extract_fn(text)
if len(nums) >= 1:
nums = remove_equal(nums)
for num in nums:
if compare_items(num, final_answer, answer_type if answer_type == "number" else "text"): # About %1 in GSM
return clean_up_fn(num)
ret = nums[0]
return clean_up_fn(ret)
else:
return None
def extract_answer_from_sentence(sentence):
ret = sentence
for pattern in ANSWER_SPLIT_PATTERNS:
indices = list(re.finditer(pattern, sentence, flags=re.IGNORECASE))
if len(indices) > 0:
tmp = sentence[indices[-1].start() :]
if len(tmp) < len(ret):
ret = tmp
return ret
def extract_answers(responses, answer_type=None, max_num_lines=3, final_answer=None, **kwargs):
if isinstance(responses, list):
return [extract_answers(_, answer_type, max_num_lines, final_answer, **kwargs) for _ in responses]
sentences = re.split(r"\n", responses) # Split text by new line or latex expression \]
sentences = [re.sub(r"^#?\d+\. ", "", _) for _ in sentences] # remove starting #1, #2, ...
sentences = [_ for _ in sentences if not _.strip("#").lower().startswith("reference:")] # remove reference lines in Natural Program | sentences = filter_stripped_lines(sentences) | 0 | 2023-11-01 17:15:42+00:00 | 4k |
mitre/arlin | tests/test_analysis/test_visualization/test_visualization.py | [
{
"identifier": "COLORS",
"path": "arlin/analysis/visualization/colors.py",
"snippet": "COLORS = [\n base[\"b\"],\n tableau[\"tab:orange\"],\n base[\"g\"],\n base[\"r\"],\n base[\"c\"],\n base[\"m\"],\n base[\"y\"],\n base[\"k\"],\n tableau[\"tab:blue\"],\n tableau[\"tab:gr... | import os
import numpy as np
import pytest
from matplotlib.patches import Patch
from arlin.analysis.visualization import (
COLORS,
GraphData,
graph_individual_data,
graph_multiple_data,
)
from arlin.analysis.visualization.visualization import _find_subplot_dims | 2,481 |
@pytest.fixture
def graph_data():
x = np.array([0, 1, 2, 3, 4])
y = np.array([2, 4, 6, 8, 10])
title = "Test"
colors = COLORS[0:5]
handles = [Patch(color=COLORS[i], label=str(i)) for i in range(5)]
labels = [f"Test {i}" for i in range(5)]
leg_title = "Test Groups"
legend = {"handles": handles, "labels": labels, "title": leg_title}
cmap = "viridis"
error_bars = [0.5, 0.5, 0.5, 0.5, 0.5]
xlabel = "Time Steps"
ylabel = "Values"
showall = True
|
@pytest.fixture
def graph_data():
x = np.array([0, 1, 2, 3, 4])
y = np.array([2, 4, 6, 8, 10])
title = "Test"
colors = COLORS[0:5]
handles = [Patch(color=COLORS[i], label=str(i)) for i in range(5)]
labels = [f"Test {i}" for i in range(5)]
leg_title = "Test Groups"
legend = {"handles": handles, "labels": labels, "title": leg_title}
cmap = "viridis"
error_bars = [0.5, 0.5, 0.5, 0.5, 0.5]
xlabel = "Time Steps"
ylabel = "Values"
showall = True
| graphdata = GraphData( | 1 | 2023-11-08 13:57:45+00:00 | 4k |
Giftify-Bot/Giftify-Bot | cogs/donations/donation_category.py | [
{
"identifier": "Giftify",
"path": "bot.py",
"snippet": "class Giftify(GiftifyHelper, commands.AutoShardedBot):\r\n user: discord.ClientUser\r\n\r\n colour: int = 0xCB3045\r\n __version_info__ = \"1.1.4\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n log_handler: LogHandler... | import datetime
import discord
from discord import app_commands
from discord.app_commands import Range, Transform
from discord.ext import commands
from bot import Giftify
from models.donation_settings import GuildDonationConfig
from utils.transformers import DonationCategoryTransformer
from utils.tree import Interaction | 3,494 |
class DonationCategory(commands.GroupCog):
"""Cog for creating/deleting donation category."""
bot: Giftify
category_command = app_commands.Group(
name="category",
description="Commands for creating or deleting donation categories.",
guild_only=True,
)
@category_command.command(name="create")
@app_commands.describe(
category="The unique name of the donation category.",
symbol="The symbol to represent the category.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def donation_category_create(
self,
interaction: Interaction,
category: Range[str, 3, 50],
symbol: Range[str, 1, 1] = "$",
) -> None:
"""The command to create a new donation category."""
await interaction.response.defer()
assert interaction.guild is not None
config = self.bot.get_donation_config(interaction.guild, category)
if config:
return await interaction.client.send(
interaction,
f"The donation category of name {category} already exists!",
"warn",
)
if len(self.bot.get_guild_donation_categories(interaction.guild)) >= 25:
return await interaction.client.send(
interaction,
"You cannot create more than `25` donation categories.",
"warn",
)
config = await GuildDonationConfig.create(
interaction.guild.id, category, self.bot, symbol=symbol
)
self.bot.donation_configs.append(config)
await interaction.client.send(
interaction,
f"Successfully created the donation category of name {category} and symbol {symbol}!",
)
@category_command.command(name="delete")
@app_commands.describe(category="The unique name of the donation category.")
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 25, key=lambda i: (i.guild, i.user.id))
async def donation_category_delete(
self,
interaction: Interaction,
|
class DonationCategory(commands.GroupCog):
"""Cog for creating/deleting donation category."""
bot: Giftify
category_command = app_commands.Group(
name="category",
description="Commands for creating or deleting donation categories.",
guild_only=True,
)
@category_command.command(name="create")
@app_commands.describe(
category="The unique name of the donation category.",
symbol="The symbol to represent the category.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def donation_category_create(
self,
interaction: Interaction,
category: Range[str, 3, 50],
symbol: Range[str, 1, 1] = "$",
) -> None:
"""The command to create a new donation category."""
await interaction.response.defer()
assert interaction.guild is not None
config = self.bot.get_donation_config(interaction.guild, category)
if config:
return await interaction.client.send(
interaction,
f"The donation category of name {category} already exists!",
"warn",
)
if len(self.bot.get_guild_donation_categories(interaction.guild)) >= 25:
return await interaction.client.send(
interaction,
"You cannot create more than `25` donation categories.",
"warn",
)
config = await GuildDonationConfig.create(
interaction.guild.id, category, self.bot, symbol=symbol
)
self.bot.donation_configs.append(config)
await interaction.client.send(
interaction,
f"Successfully created the donation category of name {category} and symbol {symbol}!",
)
@category_command.command(name="delete")
@app_commands.describe(category="The unique name of the donation category.")
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 25, key=lambda i: (i.guild, i.user.id))
async def donation_category_delete(
self,
interaction: Interaction, | category: Transform[GuildDonationConfig, DonationCategoryTransformer], | 2 | 2023-11-09 15:00:15+00:00 | 4k |
Zjy0401/CoCoFormer | utilities/run_model.py | [
{
"identifier": "get_device",
"path": "utilities/device.py",
"snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE"
},
{
"identifier": "get_lr",
"path": "utilities/lr_scheduling.py... | import torch
import time
import numpy as np
import tqdm
import torch.nn as nn
from .constants import *
from utilities.device import get_device
from .lr_scheduling import get_lr
from dataset.jsf import *
from utilities.argument_funcs import parse_train_args, parse_eval_args
from thop import profile | 2,362 | # torch.set_printoptions(profile="full")
# train_epoch
def params(dataloader, model, model_disc):
args = parse_train_args()
model.eval()
for batch_num, batch in enumerate(dataloader):
flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),
batch[0][0][1].cuda(args.gpu[0]),
batch[0][0][2].cuda(args.gpu[0]))
)
print('flops:', flops, 'params:', params)
break
def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,
lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):
args = parse_train_args()
out = -1
start_epoch = 5
model.train()
model_disc.train()
for batch_num, batch in enumerate(dataloader):
time_before = time.time()
opt.zero_grad()
opt_disc.zero_grad()
x = batch[0]
tgt = batch[1]
for i in range(len(batch[0])):
if args.gpu[0] != -1:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cuda(device=args.gpu[0])
if isinstance(x[i], torch.Tensor):
x[i] = x[i].cuda(device=args.gpu[0])
if isinstance(tgt[i], list):
for j in range(len(tgt[i])):
tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])
if isinstance(tgt[i], torch.Tensor):
tgt[i] = tgt[i].cuda(device=args.gpu[0])
else:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cpu()
tgt[i][j] = tgt[i][j].cpu()
tgt = tgt[0][0]
tgt = tgt.flatten()
with torch.no_grad():
y1 = model.module(x[1][0], x[1][1], x[1][2])
y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)
loss1 = loss.forward(y1, tgt)
y2 = model.module(x[0][0], x[0][1], x[0][2])
# discriminator model loss:
if args.gpu[0] != -1:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])
else:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)
softmax = nn.Softmax(dim=-1)
d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)
d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)
loss3 = d_fake_loss + d_real_loss
# y3 = model(x[2])
# train for only CT
# y = model(x)
y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)
loss2 = loss.forward(y2, tgt)
# tgt = tgt.flatten()
# add scheduled sampling
# out = loss.forward(y, tgt)
# out = loss3
out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3
out.backward()
opt.step()
opt_disc.step()
if lr_scheduler is not None:
lr_scheduler.step()
if lr_disc_scheduler is not None:
lr_disc_scheduler.step()
time_after = time.time()
time_took = time_after - time_before
if (batch_num + 1) % print_modulus == 0:
| # torch.set_printoptions(profile="full")
# train_epoch
def params(dataloader, model, model_disc):
args = parse_train_args()
model.eval()
for batch_num, batch in enumerate(dataloader):
flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),
batch[0][0][1].cuda(args.gpu[0]),
batch[0][0][2].cuda(args.gpu[0]))
)
print('flops:', flops, 'params:', params)
break
def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,
lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):
args = parse_train_args()
out = -1
start_epoch = 5
model.train()
model_disc.train()
for batch_num, batch in enumerate(dataloader):
time_before = time.time()
opt.zero_grad()
opt_disc.zero_grad()
x = batch[0]
tgt = batch[1]
for i in range(len(batch[0])):
if args.gpu[0] != -1:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cuda(device=args.gpu[0])
if isinstance(x[i], torch.Tensor):
x[i] = x[i].cuda(device=args.gpu[0])
if isinstance(tgt[i], list):
for j in range(len(tgt[i])):
tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])
if isinstance(tgt[i], torch.Tensor):
tgt[i] = tgt[i].cuda(device=args.gpu[0])
else:
if isinstance(x[i], list):
for j in range(len(x[i])):
x[i][j] = x[i][j].cpu()
tgt[i][j] = tgt[i][j].cpu()
tgt = tgt[0][0]
tgt = tgt.flatten()
with torch.no_grad():
y1 = model.module(x[1][0], x[1][1], x[1][2])
y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)
loss1 = loss.forward(y1, tgt)
y2 = model.module(x[0][0], x[0][1], x[0][2])
# discriminator model loss:
if args.gpu[0] != -1:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])
else:
real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)
fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)
softmax = nn.Softmax(dim=-1)
d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)
d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)
loss3 = d_fake_loss + d_real_loss
# y3 = model(x[2])
# train for only CT
# y = model(x)
y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)
loss2 = loss.forward(y2, tgt)
# tgt = tgt.flatten()
# add scheduled sampling
# out = loss.forward(y, tgt)
# out = loss3
out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3
out.backward()
opt.step()
opt_disc.step()
if lr_scheduler is not None:
lr_scheduler.step()
if lr_disc_scheduler is not None:
lr_disc_scheduler.step()
time_after = time.time()
time_took = time_after - time_before
if (batch_num + 1) % print_modulus == 0: | print("Epoch", cur_epoch, " Batch", batch_num + 1, "/", len(dataloader), "LR:", get_lr(opt_disc), | 1 | 2023-11-01 08:33:08+00:00 | 4k |
NWPlayer123/AnimalCrossing-dtk | configure.py | [
{
"identifier": "Object",
"path": "tools/project.py",
"snippet": "class Object:\n def __init__(self, completed, name, **options):\n self.name = name\n self.completed = completed\n self.options = {\n \"add_to_all\": True,\n \"cflags\": None,\n \"mw... | import sys
import argparse
from pathlib import Path
from tools.project import (
Object,
ProjectConfig,
calculate_progress,
generate_build,
is_windows,
) | 3,021 | config.compilers_path = args.compilers
config.debug = args.debug
config.generate_map = args.map
config.sjiswrap_path = args.sjiswrap
if not is_windows():
config.wrapper = args.wrapper
# Tool versions
config.compilers_tag = "20231018"
config.dtk_tag = "v0.5.8"
config.sjiswrap_tag = "v1.1.1"
config.wibo_tag = "0.6.9"
# Project
config.config_path = Path("config") / config.version / "config.yml"
config.check_sha_path = Path("config") / config.version / "build.sha1"
config.ldflags = [
"-proc gekko",
"-fp hardware",
"-nodefaults",
"-nostdlib",
]
# Base flags, common to most GC/Wii games.
# Generally leave untouched, with overrides added below.
cflags_base = [
"-nodefaults",
"-proc gekko",
"-align powerpc",
"-enum int",
"-fp hardware",
"-Cpp_exceptions off",
# "-W all",
"-O4,p",
"-inline auto",
'-pragma "cats off"',
'-pragma "warn_notinlined off"',
"-maxerrors 1",
"-nosyspath",
"-RTTI off",
"-fp_contract on",
"-str reuse",
"-i include",
"-i libc",
"-multibyte",
f"-DVERSION={version_num}",
]
# Debug flags
if config.debug:
cflags_base.extend(["-sym on", "-DDEBUG=1"])
else:
cflags_base.append("-DNDEBUG=1")
# Metrowerks library flags
cflags_runtime = [
*cflags_base,
"-use_lmw_stmw on",
"-str reuse,pool,readonly",
"-gccinc",
"-common off",
"-inline auto",
]
# REL flags
cflags_rel = [
*cflags_base,
"-sdata 0",
"-sdata2 0",
]
config.linker_version = "GC/1.3.2r"
# Helper function for Dolphin libraries
def DolphinLib(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.2.5n",
"cflags": cflags_base,
"host": False,
"objects": objects,
}
# Helper function for REL script objects
def Rel(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.3.2r",
"cflags": cflags_rel,
"host": True,
"objects": objects,
}
Matching = True
NonMatching = False
config.warn_missing_config = False
config.warn_missing_source = False
config.libs = [
{
"lib": "Runtime.PPCEABI.H",
"mw_version": config.linker_version,
"cflags": cflags_runtime,
"host": False,
"objects": [
Object(NonMatching, "Runtime.PPCEABI.H/global_destructor_chain.c"),
Object(NonMatching, "Runtime.PPCEABI.H/__init_cpp_exceptions.cpp"),
],
},
]
if args.mode == "configure":
# Write build.ninja and objdiff.json
generate_build(config)
elif args.mode == "progress":
# Print progress and write progress.json
config.progress_each_module = args.verbose
| #!/usr/bin/env python3
###
# Generates build files for the project.
# This file also includes the project configuration,
# such as compiler flags and the object matching status.
#
# Usage:
# python3 configure.py
# ninja
#
# Append --help to see available options.
###
# Game versions
DEFAULT_VERSION = 0
VERSIONS = [
"GAFE01", # USA
]
if len(VERSIONS) > 1:
versions_str = ", ".join(VERSIONS[:-1]) + f" or {VERSIONS[-1]}"
else:
versions_str = VERSIONS[0]
parser = argparse.ArgumentParser()
parser.add_argument(
"mode",
default="configure",
help="configure or progress (default: configure)",
nargs="?",
)
parser.add_argument(
"--version",
dest="version",
default=VERSIONS[DEFAULT_VERSION],
help=f"version to build ({versions_str})",
)
parser.add_argument(
"--build-dir",
dest="build_dir",
type=Path,
default=Path("build"),
help="base build directory (default: build)",
)
parser.add_argument(
"--compilers",
dest="compilers",
type=Path,
help="path to compilers (optional)",
)
parser.add_argument(
"--map",
dest="map",
action="store_true",
help="generate map file(s)",
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="build with debug info (non-matching)",
)
if not is_windows():
parser.add_argument(
"--wrapper",
dest="wrapper",
type=Path,
help="path to wibo or wine (optional)",
)
parser.add_argument(
"--build-dtk",
dest="build_dtk",
type=Path,
help="path to decomp-toolkit source (optional)",
)
parser.add_argument(
"--sjiswrap",
dest="sjiswrap",
type=Path,
help="path to sjiswrap.exe (optional)",
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="print verbose output",
)
args = parser.parse_args()
config = ProjectConfig()
config.version = args.version.upper()
if config.version not in VERSIONS:
sys.exit(f"Invalid version '{config.version}', expected {versions_str}")
version_num = VERSIONS.index(config.version)
# Apply arguments
config.build_dir = args.build_dir
config.build_dtk_path = args.build_dtk
config.compilers_path = args.compilers
config.debug = args.debug
config.generate_map = args.map
config.sjiswrap_path = args.sjiswrap
if not is_windows():
config.wrapper = args.wrapper
# Tool versions
config.compilers_tag = "20231018"
config.dtk_tag = "v0.5.8"
config.sjiswrap_tag = "v1.1.1"
config.wibo_tag = "0.6.9"
# Project
config.config_path = Path("config") / config.version / "config.yml"
config.check_sha_path = Path("config") / config.version / "build.sha1"
config.ldflags = [
"-proc gekko",
"-fp hardware",
"-nodefaults",
"-nostdlib",
]
# Base flags, common to most GC/Wii games.
# Generally leave untouched, with overrides added below.
cflags_base = [
"-nodefaults",
"-proc gekko",
"-align powerpc",
"-enum int",
"-fp hardware",
"-Cpp_exceptions off",
# "-W all",
"-O4,p",
"-inline auto",
'-pragma "cats off"',
'-pragma "warn_notinlined off"',
"-maxerrors 1",
"-nosyspath",
"-RTTI off",
"-fp_contract on",
"-str reuse",
"-i include",
"-i libc",
"-multibyte",
f"-DVERSION={version_num}",
]
# Debug flags
if config.debug:
cflags_base.extend(["-sym on", "-DDEBUG=1"])
else:
cflags_base.append("-DNDEBUG=1")
# Metrowerks library flags
cflags_runtime = [
*cflags_base,
"-use_lmw_stmw on",
"-str reuse,pool,readonly",
"-gccinc",
"-common off",
"-inline auto",
]
# REL flags
cflags_rel = [
*cflags_base,
"-sdata 0",
"-sdata2 0",
]
config.linker_version = "GC/1.3.2r"
# Helper function for Dolphin libraries
def DolphinLib(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.2.5n",
"cflags": cflags_base,
"host": False,
"objects": objects,
}
# Helper function for REL script objects
def Rel(lib_name, objects):
return {
"lib": lib_name,
"mw_version": "GC/1.3.2r",
"cflags": cflags_rel,
"host": True,
"objects": objects,
}
Matching = True
NonMatching = False
config.warn_missing_config = False
config.warn_missing_source = False
config.libs = [
{
"lib": "Runtime.PPCEABI.H",
"mw_version": config.linker_version,
"cflags": cflags_runtime,
"host": False,
"objects": [
Object(NonMatching, "Runtime.PPCEABI.H/global_destructor_chain.c"),
Object(NonMatching, "Runtime.PPCEABI.H/__init_cpp_exceptions.cpp"),
],
},
]
if args.mode == "configure":
# Write build.ninja and objdiff.json
generate_build(config)
elif args.mode == "progress":
# Print progress and write progress.json
config.progress_each_module = args.verbose | calculate_progress(config) | 2 | 2023-11-09 04:40:59+00:00 | 4k |
elenacliu/GraspStudio | grasp/grasp.py | [
{
"identifier": "Camera",
"path": "cameras/camera.py",
"snippet": "class Camera:\n config: CameraConfig\n\n def __init__(self, config : CameraConfig):\n self.config = config\n \n def rgb(self) -> NDArray:\n raise NotImplementedError('You should use a specified subclass!')\n\n ... | import numpy as np
import copy
import time
from dataclasses import dataclass, field
from typing import Type, Optional, List
from numpy.typing import NDArray
from cameras import CameraConfig, RealSenseCameraConfig, Camera
from motion_solver import PybulletMotionSolverConfig, MotionSolver
from config import InstantiateConfig | 1,909 | # Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class GraspBot for diifferent implementations of grasps."""
@dataclass
class GraspConfig(InstantiateConfig):
_target: Type = field(default_factory=lambda : Grasp)
max_gripper_width: float = 0.08
initial_camera2robot_transformation: NDArray = np.array([[-0.08920106, -0.99592763, 0.01308891, 0.33658066],
[-0.99519613, 0.08965247, 0.03933318, 0.02753368],
[-0.04034645, -0.00951747, -0.99914042, 0.6019472],
[ 0., 0. , 0. , 1. ]])
initial_joints: NDArray = np.array([-0.02159332, -0.80462398, 0.00235787, -2.16951674, 0.0373164, 1.35462832, 0.8590827])
place_joints: NDArray = np.array([1.8, -0.7855447937431189, 0.0003260311383163978, -2.3561892689822015, 0.000589521053350634, 1.5704794415504568, 0.7849731242977285])
| # Copyright 2023 Chang Liu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base class GraspBot for diifferent implementations of grasps."""
@dataclass
class GraspConfig(InstantiateConfig):
_target: Type = field(default_factory=lambda : Grasp)
max_gripper_width: float = 0.08
initial_camera2robot_transformation: NDArray = np.array([[-0.08920106, -0.99592763, 0.01308891, 0.33658066],
[-0.99519613, 0.08965247, 0.03933318, 0.02753368],
[-0.04034645, -0.00951747, -0.99914042, 0.6019472],
[ 0., 0. , 0. , 1. ]])
initial_joints: NDArray = np.array([-0.02159332, -0.80462398, 0.00235787, -2.16951674, 0.0373164, 1.35462832, 0.8590827])
place_joints: NDArray = np.array([1.8, -0.7855447937431189, 0.0003260311383163978, -2.3561892689822015, 0.000589521053350634, 1.5704794415504568, 0.7849731242977285]) | camera_config: CameraConfig = field(default_factory=lambda : RealSenseCameraConfig) | 2 | 2023-11-08 09:44:22+00:00 | 4k |
emadeldeen24/ECGTransForm | trainer.py | [
{
"identifier": "ecgTransForm",
"path": "models.py",
"snippet": "class ecgTransForm(nn.Module):\r\n def __init__(self, configs, hparams):\r\n super(ecgTransForm, self).__init__()\r\n\r\n filter_sizes = [5, 9, 11]\r\n self.conv1 = nn.Conv1d(configs.input_channels, configs.mid_chan... | import torch
import torch.nn.functional as F
import os
import collections
import numpy as np
import warnings
import sklearn.exceptions
from models import ecgTransForm
from dataloader import data_generator
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, _plot_umap
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics | 3,417 |
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
|
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \ | data_generator(self.data_path, data_type, self.hparams) | 1 | 2023-11-06 14:11:19+00:00 | 4k |
WMD-group/CrystalSpace | app.py | [
{
"identifier": "get_plotly_embedding",
"path": "visualize_app/visualize_embedding.py",
"snippet": "def get_plotly_embedding(\n df: pd.DataFrame = None,\n opacity: float = 0.2,\n **kwargs,\n) -> go.Figure:\n \"\"\"\n Plot the embedding of a dataframe with plotly.\n\n Args:\n df:... | import os
import ase
import pandas as pd
import dash_bootstrap_components as dbc
from pathlib import Path
from fire import Fire
from pymatgen.core import Structure
from dash import Dash, html, Input, Output, dcc, dash_table, no_update
from visualize_app.visualize_embedding import get_plotly_embedding
from visualize_app.visualize_structure import get_plotly_structure
from visualize_app.utils import fn_chemical_check, blank_fig | 3,080 | # set the app title
dbc.Row(
[
html.H1(
"Crystal Space for Binary Compounds 🔮",
style={
"textAlign": "center",
"color": "black",
},
),
html.Hr(),
]
),
# set selector for methods
dbc.Row(
[
# set selector for dimension reduction method
dbc.Col(
dbc.Select(
id="reduction-method-select",
options=[
{"label": "t-SNE", "value": "tsne"},
{"label": "UMAP", "value": "umap"},
{"label": "PCA", "value": "pca"},
],
value="umap",
),
width=3,
),
# set selector for embedding method
dbc.Col(
dbc.Select(
id="embedding-method-select",
options=[
{"label": "magpie", "value": "magpie"},
{"label": "mat2vec", "value": "mat2vec"},
{"label": "megnet16", "value": "megnet16"},
{"label": "oliynyk", "value": "oliynyk"},
{"label": "skipatom", "value": "skipatom"},
{"label": "random_200", "value": "random_200"},
],
value="magpie",
),
width=3,
),
],
justify="start",
),
html.Br(),
# set selector for chemical systems
dbc.Row(
[
# set selector for chemical system 1
dbc.Col(
dbc.Select(
id="chemical-system-select-1",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 1", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
# set selector for chemical system 2
dbc.Col(
dbc.Select(
id="chemical-system-select-2",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 2", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
],
justify="start",
),
dcc.Store(id="embedding-data-store", data=None),
html.Br(),
# set scatter and crystal structure
dbc.Row(
[
# set the scatter plot
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Space",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
dcc.Markdown(
id="method-name",
children="",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
dcc.Graph(
id="3d-scatter-plot",
|
PARENT_DIR = Path(os.path.dirname(__file__))
# load label data
LABEL_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_label.pkl")
LABEL_DATA["label"] = LABEL_DATA["label"].astype(str)
# load materials project data
MP_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_mp.pkl")
def main(
debug: bool = False,
host: str = "0.0.0.0",
port: int = 8050,
):
"""Visualize the embedding of binary compounds.
:param debug: Debug mode, defaults to False
:param host: host address, defaults to "0.0.0.0"
:param port: port number, defaults to 8050
"""
# initialize the app - incorporate a Dash Bootstrap theme
external_stylesheets = [dbc.themes.MINTY]
app = Dash(__name__, external_stylesheets=external_stylesheets)
# app layout
app.layout = dbc.Container(
[
# set the app title
dbc.Row(
[
html.H1(
"Crystal Space for Binary Compounds 🔮",
style={
"textAlign": "center",
"color": "black",
},
),
html.Hr(),
]
),
# set selector for methods
dbc.Row(
[
# set selector for dimension reduction method
dbc.Col(
dbc.Select(
id="reduction-method-select",
options=[
{"label": "t-SNE", "value": "tsne"},
{"label": "UMAP", "value": "umap"},
{"label": "PCA", "value": "pca"},
],
value="umap",
),
width=3,
),
# set selector for embedding method
dbc.Col(
dbc.Select(
id="embedding-method-select",
options=[
{"label": "magpie", "value": "magpie"},
{"label": "mat2vec", "value": "mat2vec"},
{"label": "megnet16", "value": "megnet16"},
{"label": "oliynyk", "value": "oliynyk"},
{"label": "skipatom", "value": "skipatom"},
{"label": "random_200", "value": "random_200"},
],
value="magpie",
),
width=3,
),
],
justify="start",
),
html.Br(),
# set selector for chemical systems
dbc.Row(
[
# set selector for chemical system 1
dbc.Col(
dbc.Select(
id="chemical-system-select-1",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 1", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
# set selector for chemical system 2
dbc.Col(
dbc.Select(
id="chemical-system-select-2",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 2", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
],
justify="start",
),
dcc.Store(id="embedding-data-store", data=None),
html.Br(),
# set scatter and crystal structure
dbc.Row(
[
# set the scatter plot
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Space",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
dcc.Markdown(
id="method-name",
children="",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
dcc.Graph(
id="3d-scatter-plot", | figure=blank_fig(), | 3 | 2023-11-07 17:10:38+00:00 | 4k |
serl-robot/serl | serl/agents/sac/sac_learner.py | [
{
"identifier": "Agent",
"path": "serl/agents/agent.py",
"snippet": "class Agent(struct.PyTreeNode):\n actor: TrainState\n rng: PRNGKey\n\n def eval_actions(self, observations: np.ndarray) -> np.ndarray:\n actions = _eval_actions(self.actor.apply_fn, self.actor.params, observations)\n ... | from functools import partial
from typing import Dict, Optional, Sequence, Tuple
from flax import struct
from flax.training.train_state import TrainState
from serl.agents.agent import Agent
from serl.agents.sac.temperature import Temperature
from serl.data.dataset import DatasetDict
from serl.distributions import TanhNormal
from serl.networks import MLP, Ensemble, StateActionValue, subsample_ensemble
import gym
import jax
import jax.numpy as jnp
import optax | 2,238 | """Implementations of algorithms for continuous control."""
class SACLearner(Agent):
critic: TrainState
target_critic: TrainState
temp: TrainState
tau: float
discount: float
target_entropy: float
num_qs: int = struct.field(pytree_node=False)
num_min_qs: Optional[int] = struct.field(
pytree_node=False
) # See M in RedQ https://arxiv.org/abs/2101.05982
backup_entropy: bool = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
num_qs: int = 2,
num_min_qs: Optional[int] = None,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0,
backup_entropy: bool = True,
):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
if target_entropy is None:
target_entropy = -action_dim / 2
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_def = TanhNormal(actor_base_cls, action_dim)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
)
critic_cls = partial(StateActionValue, base_cls=critic_base_cls)
critic_def = Ensemble(critic_cls, num=num_qs)
critic_params = critic_def.init(critic_key, observations, actions)["params"]
critic = TrainState.create(
apply_fn=critic_def.apply,
params=critic_params,
tx=optax.adam(learning_rate=critic_lr),
)
target_critic_def = Ensemble(critic_cls, num=num_min_qs or num_qs)
target_critic = TrainState.create(
apply_fn=target_critic_def.apply,
params=critic_params,
tx=optax.GradientTransformation(lambda _: None, lambda _: None),
)
temp_def = Temperature(init_temperature)
temp_params = temp_def.init(temp_key)["params"]
temp = TrainState.create(
apply_fn=temp_def.apply,
params=temp_params,
tx=optax.adam(learning_rate=temp_lr),
)
return cls(
rng=rng,
actor=actor,
critic=critic,
target_critic=target_critic,
temp=temp,
target_entropy=target_entropy,
tau=tau,
discount=discount,
num_qs=num_qs,
num_min_qs=num_min_qs,
backup_entropy=backup_entropy,
)
| """Implementations of algorithms for continuous control."""
class SACLearner(Agent):
critic: TrainState
target_critic: TrainState
temp: TrainState
tau: float
discount: float
target_entropy: float
num_qs: int = struct.field(pytree_node=False)
num_min_qs: Optional[int] = struct.field(
pytree_node=False
) # See M in RedQ https://arxiv.org/abs/2101.05982
backup_entropy: bool = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
num_qs: int = 2,
num_min_qs: Optional[int] = None,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0,
backup_entropy: bool = True,
):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
if target_entropy is None:
target_entropy = -action_dim / 2
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_def = TanhNormal(actor_base_cls, action_dim)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
)
critic_cls = partial(StateActionValue, base_cls=critic_base_cls)
critic_def = Ensemble(critic_cls, num=num_qs)
critic_params = critic_def.init(critic_key, observations, actions)["params"]
critic = TrainState.create(
apply_fn=critic_def.apply,
params=critic_params,
tx=optax.adam(learning_rate=critic_lr),
)
target_critic_def = Ensemble(critic_cls, num=num_min_qs or num_qs)
target_critic = TrainState.create(
apply_fn=target_critic_def.apply,
params=critic_params,
tx=optax.GradientTransformation(lambda _: None, lambda _: None),
)
temp_def = Temperature(init_temperature)
temp_params = temp_def.init(temp_key)["params"]
temp = TrainState.create(
apply_fn=temp_def.apply,
params=temp_params,
tx=optax.adam(learning_rate=temp_lr),
)
return cls(
rng=rng,
actor=actor,
critic=critic,
target_critic=target_critic,
temp=temp,
target_entropy=target_entropy,
tau=tau,
discount=discount,
num_qs=num_qs,
num_min_qs=num_min_qs,
backup_entropy=backup_entropy,
)
| def update_actor(self, batch: DatasetDict) -> Tuple[Agent, Dict[str, float]]: | 2 | 2023-11-02 23:32:24+00:00 | 4k |
daily-demos/ai-meeting-assistant | server/call/operator.py | [
{
"identifier": "BotConfig",
"path": "server/config.py",
"snippet": "class BotConfig:\n _openai_api_key: str = None\n _openai_model_name: str = None\n _log_dir_path: str = None\n _daily_room_url: str = None\n _daily_meeting_token: str = None\n\n def __init__(self,\n ope... | import threading
import polling2
from daily import Daily
from server.config import BotConfig
from server.call.session import Session | 3,499 | """Module which keeps track of all ongoing sessions and provides
querying functionality to HTTP requesters."""
class Operator():
_sessions: list[Session]
_is_shutting_down: bool
_lock: threading.Lock
def __init__(self):
self._is_shutting_down = False
self._lock = threading.Lock()
self._sessions = []
Daily.init()
t = threading.Thread(target=self.cleanup)
t.start()
| """Module which keeps track of all ongoing sessions and provides
querying functionality to HTTP requesters."""
class Operator():
_sessions: list[Session]
_is_shutting_down: bool
_lock: threading.Lock
def __init__(self):
self._is_shutting_down = False
self._lock = threading.Lock()
self._sessions = []
Daily.init()
t = threading.Thread(target=self.cleanup)
t.start()
| def create_session(self, bot_config: BotConfig) -> Session: | 0 | 2023-11-02 11:17:16+00:00 | 4k |
Kushalhk/AutoFilter | plugins/gfilters.py | [
{
"identifier": "add_gfilter",
"path": "database/gfilters_mdb.py",
"snippet": "async def add_gfilter(gfilters, text, reply_text, btn, file, alert):\n mycol = mydb[str(gfilters)]\n # mycol.create_index([('text', 'text')])\n\n data = {\n 'text':str(text),\n 'reply':str(reply_text),\... | import io
from pyrogram import filters, Client, enums
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from database.gfilters_mdb import(
add_gfilter,
get_gfilters,
delete_gfilter,
count_gfilters
)
from database.connections_mdb import active_connection
from utils import get_file_id, gfilterparser, split_quotes
from info import ADMINS | 2,063 |
@Client.on_message(filters.command(['gfilter', 'addg']) & filters.incoming & filters.user(ADMINS))
async def addgfilter(client, message):
args = message.text.html.split(None, 1)
if len(args) < 2:
await message.reply_text("Command Incomplete :(", quote=True)
return
extracted = split_quotes(args[1])
text = extracted[0].lower()
if not message.reply_to_message and len(extracted) < 2:
await message.reply_text("Add some content to save your filter!", quote=True)
return
if (len(extracted) >= 2) and not message.reply_to_message:
reply_text, btn, alert = gfilterparser(extracted[1], text)
fileid = None
if not reply_text:
await message.reply_text("You cannot have buttons alone, give some text to go with it!", quote=True)
return
elif message.reply_to_message and message.reply_to_message.reply_markup:
try:
rm = message.reply_to_message.reply_markup
btn = rm.inline_keyboard
msg = get_file_id(message.reply_to_message)
if msg:
fileid = msg.file_id
reply_text = message.reply_to_message.caption.html
else:
reply_text = message.reply_to_message.text.html
fileid = None
alert = None
except:
reply_text = ""
btn = "[]"
fileid = None
alert = None
elif message.reply_to_message and message.reply_to_message.media:
try:
msg = get_file_id(message.reply_to_message)
fileid = msg.file_id if msg else None
reply_text, btn, alert = gfilterparser(extracted[1], text) if message.reply_to_message.sticker else gfilterparser(message.reply_to_message.caption.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
elif message.reply_to_message and message.reply_to_message.text:
try:
fileid = None
reply_text, btn, alert = gfilterparser(message.reply_to_message.text.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
else:
return
await add_gfilter('gfilters', text, reply_text, btn, fileid, alert)
await message.reply_text(
f"GFilter for `{text}` added",
quote=True,
parse_mode=enums.ParseMode.MARKDOWN
)
@Client.on_message(filters.command(['viewgfilters', 'gfilters']) & filters.incoming & filters.user(ADMINS))
async def get_all_gfilters(client, message):
|
@Client.on_message(filters.command(['gfilter', 'addg']) & filters.incoming & filters.user(ADMINS))
async def addgfilter(client, message):
args = message.text.html.split(None, 1)
if len(args) < 2:
await message.reply_text("Command Incomplete :(", quote=True)
return
extracted = split_quotes(args[1])
text = extracted[0].lower()
if not message.reply_to_message and len(extracted) < 2:
await message.reply_text("Add some content to save your filter!", quote=True)
return
if (len(extracted) >= 2) and not message.reply_to_message:
reply_text, btn, alert = gfilterparser(extracted[1], text)
fileid = None
if not reply_text:
await message.reply_text("You cannot have buttons alone, give some text to go with it!", quote=True)
return
elif message.reply_to_message and message.reply_to_message.reply_markup:
try:
rm = message.reply_to_message.reply_markup
btn = rm.inline_keyboard
msg = get_file_id(message.reply_to_message)
if msg:
fileid = msg.file_id
reply_text = message.reply_to_message.caption.html
else:
reply_text = message.reply_to_message.text.html
fileid = None
alert = None
except:
reply_text = ""
btn = "[]"
fileid = None
alert = None
elif message.reply_to_message and message.reply_to_message.media:
try:
msg = get_file_id(message.reply_to_message)
fileid = msg.file_id if msg else None
reply_text, btn, alert = gfilterparser(extracted[1], text) if message.reply_to_message.sticker else gfilterparser(message.reply_to_message.caption.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
elif message.reply_to_message and message.reply_to_message.text:
try:
fileid = None
reply_text, btn, alert = gfilterparser(message.reply_to_message.text.html, text)
except:
reply_text = ""
btn = "[]"
alert = None
else:
return
await add_gfilter('gfilters', text, reply_text, btn, fileid, alert)
await message.reply_text(
f"GFilter for `{text}` added",
quote=True,
parse_mode=enums.ParseMode.MARKDOWN
)
@Client.on_message(filters.command(['viewgfilters', 'gfilters']) & filters.incoming & filters.user(ADMINS))
async def get_all_gfilters(client, message): | texts = await get_gfilters('gfilters') | 1 | 2023-11-03 12:21:26+00:00 | 4k |
tiendatnguyen-vision/Orbit-symmetrize | RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py | [
{
"identifier": "torch_dtype",
"path": "RotatedMNIST/LPS/emlp-pytorch/emlp_pytorch/reps/utils.py",
"snippet": "def torch_dtype(dtype):\n \"\"\" Convert a string representation of a torch dtype to the actual\n torch dtype. \"\"\"\n if isinstance(dtype, torch.dtype):\n return dtype\n if... | import warnings
import torch
from torch import nn
from .utils import torch_dtype, dtype_cast, torch_device, device_cast, get_dtype | 2,797 |
def _matvec(self, v):
return self.A._rmatvec(v)
def _rmatvec(self, v):
return self.A._matvec(v)
def _matmat(self, V):
return self.A._rmatmat(V)
def _rmatmat(self, V):
return self.A._matmat(V)
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary linear operator"""
def __init__(self, A):
super().__init__()
self.A = A
self.init(dtype=A.dtype, shape=(A.size(1), A.size(0)), device=A.device)
def _matvec(self, v):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatvec(torch.conj(v)))
def _rmatvec(self, v):
return torch.conj(self.A._matvec(torch.conj(v)))
def _matmat(self, V):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatmat(torch.conj(V)))
def _rmatmat(self, V):
return torch.conj(self.A._matmat(torch.conj(V)))
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _SumLinearOperator(LinearOperator):
""" Sum of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), A.shape, A.device)
def _matvec(self, v):
return self.A.matvec(v) + self.B.matvec(v)
def _rmatvec(self, v):
return self.A.rmatvec(v) + self.B.rmatvec(v)
def _rmatmat(self, V):
return self.A.rmatmat(V) + self.B.rmatmat(V)
def _matmat(self, V):
return self.A.matmat(V) + self.B.matmat(V)
def _adjoint(self):
return self.A.H() + self.B.H()
def invt(self):
""" Inverse transpose this linear operator. """
return self.A.invt() + self.B.invt()
def to(self, device):
self.A = self.A.to(device)
self.B = self.B.to(device)
self.device = self.A.device
return self
class _ProductLinearOperator(LinearOperator):
""" Product of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.size(1) != B.size(0):
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), (A.size(0), B.size(1)), A.device)
def _matvec(self, v):
return self.A.matvec(self.B.matvec(v))
def _rmatvec(self, v):
return self.B.rmatvec(self.A.rmatvec(v))
def _rmatmat(self, V):
return self.B.rmatmat(self.A.rmatmat(V))
def _matmat(self, V):
return self.A.matmat(self.B.matmat(V))
def _adjoint(self):
return self.B.H() * self.A.H()
def invt(self):
return self.A.invt()*self.B.invt()
def to_dense(self):
A = self.A.to_dense() if isinstance(self.A, LinearOperator) else self.A
B = self.B.to_dense() if isinstance(self.B, LinearOperator) else self.B
| # pylint: disable=W0212:protected-access
""" Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import torch
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super(Ones, self).__init__(dtype=None, shape=shape)
... def _matvec(self, v):
... return x.sum().repeat(self.size(0))
Instances of this class emulate ``torch.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> offsets = torch.tensor([[1, 0, 2], [0, -1, 0], [0, 0, 3]]).to_sparse()
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot(torch.tensor([1, 2, 3]))
tensor([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (torch.ones(A.shape, A.dtype) + offsets.to_dense()).dot(torch.tensor([1, 2, 3]))
tensor([13, 4, 15])
Several algorithms in the ``torch.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
def isscalar(x):
""" Is x a scalar? """
return isinstance(x, (int, float, complex))
def isintlike(x):
""" Is x an integer-like object? """
return isinstance(x, int)
def isshape(x, nonneg=False):
"""Is x a valid 2-tuple of dimensions?
If nonneg, also checks that the dimensions are non-negative.
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except Exception:
return False
else:
if (isscalar(M) and isscalar(N)) or (isintlike(M) and isintlike(N)):
if not nonneg or (M >= 0 and N >= 0):
return True
return False
class LinearOperator(nn.Module):
""" Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M, N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N, K).
dtype : dtype
Data type of the matrix.
rmatmat : callable f(V)
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
ndim : int
Number of dimensions (this is always 2)
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
More details regarding how to subclass a LinearOperator and several
examples of concrete LinearOperator instances can be found in the
external project `PyLops <https://pylops.readthedocs.io>`_.
Examples
--------
>>> def mv(v):
... return torch.tensor([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(torch.ones(2))
tensor([ 2., 3.])
>>> A * torch.ones(2)
tensor([ 2., 3.])
"""
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
warnings.warn("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.",
category=RuntimeWarning, stacklevel=2)
return obj
def __init__(self):
super().__init__()
self.ndim = 2
self.dtype = None
self.shape = None
self.device = None
def init(self, dtype, shape, device):
""" Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
Called from subclasses at the end of the __init__ routine.
"""
if dtype is None:
dtype = torch.float # force float 32
else:
if not isinstance(dtype, torch.dtype):
dtype = torch_dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError(f"invalid shape {(shape,)} (must be 2-d)")
self.dtype = dtype
self.shape = torch.Size(shape)
self.device = torch_device(device)
def size(self, dim=None):
""" Return the size of this LinearOperator.
This is a synonym for ``shape``.
"""
return self.shape if dim is None else self.shape[dim]
def _matmat(self, V):
""" Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])
def _matvec(self, v):
""" Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(v.reshape(-1, 1))
def matvec(self, v):
""" Matrix-vector multiplication.
Performs the operation y=A*v where A is an MxN linear
operator and v is a column vector or 1-d array.
Parameters
----------
v : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
M, N = self.shape
if v.shape != (N,) and v.shape != (N, 1):
raise ValueError('dimension mismatch')
y = self._matvec(v)
if v.ndim == 1:
y = y.reshape(M)
elif v.ndim == 2:
y = y.reshape(M, 1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, v):
""" Adjoint matrix-vector multiplication.
Performs the operation y = A^H * v where A is an MxN linear
operator and v is a column vector or 1-d array.
Parameters
----------
v : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the v argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
M, N = self.shape
if v.shape != (M,) and v.shape != (M, 1):
raise ValueError('dimension mismatch')
y = self._rmatvec(v)
if v.ndim == 1:
y = y.reshape(N)
elif v.ndim == 2:
y = y.reshape(N, 1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, v):
""" Default implementation of _rmatvec; defers to adjoint. """
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
return self.H().matvec(v)
def matmat(self, V):
""" Matrix-matrix multiplication.
Performs the operation y=A*V where A is an MxN linear
operator and V dense N*K matrix or ndarray.
Parameters
----------
V : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the V argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
if V.ndim != 2:
raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')
if V.size(0) != self.size(1):
raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')
Y = self._matmat(V)
return Y
def rmatmat(self, V):
""" Adjoint matrix-matrix multiplication.
Performs the operation y = A^H * V where A is an MxN linear
operator and V is a column vector or 1-d array, or 2-d array.
The default implementation defers to the adjoint.
Parameters
----------
V : {matrix, ndarray}
A matrix or 2D array.
Returns
-------
Y : {matrix, ndarray}
A matrix or 2D array depending on the type of the input.
Notes
-----
This rmatmat wraps the user-specified rmatmat routine.
"""
if V.ndim != 2:
raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')
if V.size(0) != self.size(0):
raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')
Y = self._rmatmat(V)
return Y
def _rmatmat(self, V):
""" Default implementation of _rmatmat defers to rmatvec or adjoint. """
if type(self)._adjoint == LinearOperator._adjoint:
return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])
return self.H().matmat(V)
def forward(self, v):
""" Matrix-vector or matrix-matrix multiplication. """
return self*v
def __mul__(self, v):
return self.dot(v)
def dot(self, v):
""" Matrix-matrix or matrix-vector multiplication.
Parameters
----------
v : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Av : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(v, LinearOperator):
return _ProductLinearOperator(self, v)
if torch.is_tensor(v):
if v.ndim == 0:
return _ScaledLinearOperator(self, v)
if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:
return self.matvec(v)
if v.ndim == 2:
return self.matmat(v)
raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')
def __matmul__(self, other):
if isscalar(other):
raise ValueError("Scalar operands are not allowed, use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if isscalar(other):
raise ValueError("Scalar operands are not allowed, use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if isscalar(x):
return _ScaledLinearOperator(self, x)
return NotImplemented
def __pow__(self, p):
if isscalar(p):
return _PowerLinearOperator(self, p)
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
if torch.is_tensor(x) and x.ndim == 2:
return _SumLinearOperator(self, Lazy(x))
return NotImplemented
def __radd__(self, x):
return self.__add__(x)
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M, N = self.shape
if self.dtype is None:
dtype = 'unspecified dtype'
else:
dtype = 'dtype=' + str(self.dtype)
return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'
def adjoint(self):
""" Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
def H(self):
""" Hermitian adjoint. """
return self.adjoint()
def transpose(self):
""" Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
def t(self):
""" Transpose this linear operator. """
return self.transpose()
def _adjoint(self):
""" Default implementation of _adjoint; defers to rmatvec. """
return _AdjointLinearOperator(self)
def _transpose(self):
""" Default implementation of _transpose; defers to rmatvec + conj"""
return _TransposedLinearOperator(self)
def invt(self):
""" Default implementation of inverse transpose; defers to inv + T """
return (self ** -1).transpose()
def to_dense(self):
""" Default implementation of to_dense which produces the dense
matrix corresponding to the given lazy matrix. Defaults to
multiplying by the identity """
return self@torch.eye(self.size(-1), device=self.device)
def to(self, device):
""" Move this linear operator to a new device. """
self.device = torch.empty(0).to(device).device
return self
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
dtype=None, device=None, rmatmat=None):
super().__init__()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__rmatmat_impl = rmatmat
self.__matmat_impl = matmat
self.init(dtype, shape, device)
def _matmat(self, V):
if self.__matmat_impl is not None:
return self.__matmat_impl(V)
return super()._matmat(V)
def _matvec(self, v):
return self.__matvec_impl(v)
def _rmatvec(self, v):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(v)
def _rmatmat(self, V):
if self.__rmatmat_impl is not None:
return self.__rmatmat_impl(V)
return super()._rmatmat(V)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.size(1), self.size(0)),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
matmat=self.__rmatmat_impl,
rmatmat=self.__matmat_impl,
dtype=self.dtype,
device=self.device)
class _AdjointLinearOperator(LinearOperator):
"""Adjoint of arbitrary linear operator"""
def __init__(self, A):
super().__init__()
self.A = A
self.init(dtype=A.dtype, shape=(A.size(1), A.size(0)), device=A.device)
def _matvec(self, v):
return self.A._rmatvec(v)
def _rmatvec(self, v):
return self.A._matvec(v)
def _matmat(self, V):
return self.A._rmatmat(V)
def _rmatmat(self, V):
return self.A._matmat(V)
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary linear operator"""
def __init__(self, A):
super().__init__()
self.A = A
self.init(dtype=A.dtype, shape=(A.size(1), A.size(0)), device=A.device)
def _matvec(self, v):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatvec(torch.conj(v)))
def _rmatvec(self, v):
return torch.conj(self.A._matvec(torch.conj(v)))
def _matmat(self, V):
# torch.conj works also on sparse matrices
return torch.conj(self.A._rmatmat(torch.conj(V)))
def _rmatmat(self, V):
return torch.conj(self.A._matmat(torch.conj(V)))
def to(self, device):
self.A = self.A.to(device)
self.device = self.A.device
return self
class _SumLinearOperator(LinearOperator):
""" Sum of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), A.shape, A.device)
def _matvec(self, v):
return self.A.matvec(v) + self.B.matvec(v)
def _rmatvec(self, v):
return self.A.rmatvec(v) + self.B.rmatvec(v)
def _rmatmat(self, V):
return self.A.rmatmat(V) + self.B.rmatmat(V)
def _matmat(self, V):
return self.A.matmat(V) + self.B.matmat(V)
def _adjoint(self):
return self.A.H() + self.B.H()
def invt(self):
""" Inverse transpose this linear operator. """
return self.A.invt() + self.B.invt()
def to(self, device):
self.A = self.A.to(device)
self.B = self.B.to(device)
self.device = self.A.device
return self
class _ProductLinearOperator(LinearOperator):
""" Product of two Linear Operators """
def __init__(self, A, B):
super().__init__()
if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.size(1) != B.size(0):
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
self.A = A
self.B = B
self.init(get_dtype([A, B]), (A.size(0), B.size(1)), A.device)
def _matvec(self, v):
return self.A.matvec(self.B.matvec(v))
def _rmatvec(self, v):
return self.B.rmatvec(self.A.rmatvec(v))
def _rmatmat(self, V):
return self.B.rmatmat(self.A.rmatmat(V))
def _matmat(self, V):
return self.A.matmat(self.B.matmat(V))
def _adjoint(self):
return self.B.H() * self.A.H()
def invt(self):
return self.A.invt()*self.B.invt()
def to_dense(self):
A = self.A.to_dense() if isinstance(self.A, LinearOperator) else self.A
B = self.B.to_dense() if isinstance(self.B, LinearOperator) else self.B | A, B = device_cast(A, B) | 3 | 2023-11-01 07:19:02+00:00 | 4k |
AnonCatalyst/Scavenger | scavenger.py | [
{
"identifier": "get_system_info",
"path": "src/inf.py",
"snippet": "def get_system_info(target_text_widget=None):\n if target_text_widget is None:\n return \"Error: target_text_widget not provided.\"\n\n # System information\n system_info_text = \"System Information:\\n\"\n system_in... | import sys
import re
import json
import httpx
import urllib3
import urllib.parse
import asyncio
import serpapi
import time
import psutil
import subprocess
import os
import requests
import logging
import warnings
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QTabWidget, QPlainTextEdit
from PyQt5.QtCore import Qt
from PyQt5 import QtGui
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QStackedWidget, QStackedLayout, QSizePolicy
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QSpacerItem, QSizePolicy
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QPushButton, QHBoxLayout
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QLineEdit
from src.inf import get_system_info
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QMainWindow, QDesktopWidget
from src.usr import check_user_in_urls
from requests.exceptions import RequestException, ConnectionError, TooManyRedirects, SSLError
from colorama import Fore
from datetime import datetime
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton, QGraphicsBlurEffect
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QFrame
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QTimer, QPropertyAnimation, QEasingCurve, QPoint
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QPushButton, QStackedWidget, QLabel, QDesktopWidget, QMainWindow, QApplication
from PyQt5.QtGui import QPixmap, QPalette, QBrush, QImage, QDesktopServices
from PyQt5.QtWidgets import QMenu, QAction
from PyQt5.QtWidgets import QGridLayout | 3,235 | # Add widgets to the layouts
results_layout.addWidget(self.result_text)
errors_layout.addWidget(self.error_text)
log_layout.addWidget(self.log_text)
# Set the background color for the text boxes in all tabs
for text_edit in [self.result_text, self.error_text, self.log_text]:
text_edit.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add layouts to the corresponding tabs
results_tab.setLayout(results_layout)
errors_tab.setLayout(errors_layout)
log_tab.setLayout(log_layout)
# Add the tab widget to the main layout
layout.addWidget(tabs)
self.setLayout(layout)
for widget in [self.username_input, self.result_text, self.error_text, self.log_text]:
widget.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
def run_user_search(self):
target_username = self.username_input.text()
if not target_username:
QMessageBox.warning(self, "Warning", "Please enter a target username.")
return
# Create an instance of the username search thread and pass the target_username and url_list
url_list = self.load_urls_from_file()
self.search_thread = UserSearchThread(target_username, url_list)
self.search_thread.search_result.connect(self.display_username_search_result)
self.search_thread.error.connect(self.display_error)
self.search_thread.log.connect(self.display_log)
# Start the search thread
self.search_thread.start()
self.display_username_search_result("Searching for user in URLs...")
def display_username_search_result(self, result):
self.result_text.append(result)
def display_error(self, error):
self.error_text.append(error)
def display_log(self, log):
self.log_text.append(log)
def load_urls_from_file(self):
try:
with open("src/urls.txt", "r") as f:
return [x.strip() for x in f.readlines()]
except FileNotFoundError:
QMessageBox.warning(self, "Warning", "URLs file (src/urls.txt) not found.")
return []
class HomeWindow(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
main_layout = QVBoxLayout()
# Create a QHBoxLayout for the widgets on the right side (image, name, and bio)
right_layout = QHBoxLayout()
# Create a QLabel for displaying the image
image_label = QLabel(self)
pixmap = QPixmap('img/discord.jpg') # Replace 'img/profile_image.jpg' with the actual path to your image
pixmap = pixmap.scaledToWidth(100) # Set the desired width
image_label.setPixmap(pixmap)
image_label.setAlignment(Qt.AlignCenter) # Center the image
# Create a QVBoxLayout for the right side (name and bio)
text_layout = QVBoxLayout()
# Create a QLabel for displaying the name
name_label = QLabel('Scavenger Osint GUI')
name_label.setAlignment(Qt.AlignCenter) # Center the text
# Create a QTextEdit for displaying the bio
bio_box = QTextEdit()
bio_box.setReadOnly(True)
bio_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Read content from bio.txt file and set it to the bio box
try:
with open('src/bio.txt', 'r') as file:
bio_text = file.read()
bio_box.setPlainText(bio_text)
except FileNotFoundError:
bio_box.setPlainText("Bio file not found.")
# Add name and bio widgets to the text layout
text_layout.addWidget(name_label)
text_layout.addWidget(bio_box)
# Add image and text layout to the right layout
right_layout.addWidget(image_label)
right_layout.addLayout(text_layout)
# Add the right layout to the main layout
main_layout.addLayout(right_layout)
# Create a scrollable box for displaying system information
info_box = QPlainTextEdit()
info_box.setReadOnly(True)
info_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add the info box to the main layout
main_layout.addWidget(info_box)
self.setLayout(main_layout)
# Get and display system information
|
# Add these lines before the class definitions where the warnings occur
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Configure the logging module
logging.basicConfig(filename='src/maigret.log', level=logging.INFO, format='%(asctime)s [%(levelname)s]: %(message)s')
logger = logging.getLogger(__name__)
os.system("clear")
# Initialize UserAgent object
user_agent = UserAgent()
# Define headers with a fake user agent
headers = {
'User-Agent': user_agent.random,
'Accept-Language': 'en-US,en;q=0.5',
# Add any other headers you may need
}
# Set up the 'header' variable
header = headers
# Disable urllib3 warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Load social platform patterns from a JSON file
with open("src/social_platforms.json", "r") as json_file:
social_platforms = json.load(json_file)
found_social_profiles = set()
found_forum_pages = set()
class GoogleSearchError(Exception):
pass
class MaigretSearchThread(QThread):
maigret_finished = pyqtSignal(str)
log_message = pyqtSignal(str)
def __init__(self, username):
super().__init__()
self.username = username
self.start_time = None
def run(self):
self.start_time = datetime.now()
# Log the start of the Maigret process
self.log_message.emit(f"Maigret process started for username: {self.username}")
try:
# Run the Maigret command with the inputted username
command = f"python3 src/maigret/maigret.py {self.username} -a"
result = os.popen(command).read()
# Log the end of the Maigret process
self.log_message.emit(f"Maigret process ended for username: {self.username}")
# Log the duration of the Maigret process
end_time = datetime.now()
duration = end_time - self.start_time
self.log_message.emit(f"Maigret process took {duration}")
self.maigret_finished.emit(result)
except Exception as e:
error_message = f"Error in MaigretSearchThread: {str(e)}"
self.log_message.emit(error_message)
self.maigret_finished.emit(error_message)
class MaigretSearchGUI(QWidget):
def __init__(self):
super().__init__()
self.username_input = QLineEdit()
self.maigret_result_text = QTextEdit()
self.log_text = QTextEdit()
self.maigret_thread = None # Initialize maigret_thread as None
self.maigret_timer = QTimer()
self.maigret_timer.timeout.connect(self.update_maigret_status)
# Set the interval to 15 seconds (15000 milliseconds)
self.maigret_timer.start(15000)
self.init_ui()
def init_ui(self):
layout = QVBoxLayout()
tab_widget = QTabWidget()
# Create tabs
maigret_tab = QWidget()
log_tab = QWidget()
tab_widget.addTab(maigret_tab, "Maigret Results")
tab_widget.addTab(log_tab, "Logs")
# Layouts for each tab
maigret_layout = QVBoxLayout(maigret_tab)
log_layout = QVBoxLayout(log_tab)
# Maigret tab content
label_username = QLabel("Enter target username:")
maigret_layout.addWidget(label_username)
maigret_layout.addWidget(self.username_input)
search_button = QPushButton("- ᴄʟɪᴄᴋ ʜᴇʀᴇ ᴛᴏ ꜱᴛᴀʀᴛ -")
search_button.clicked.connect(self.run_maigret_search)
maigret_layout.addWidget(search_button)
maigret_layout.addWidget(self.maigret_result_text)
# Log tab content
log_layout.addWidget(self.log_text)
# Set the background color and border style for the input boxes and result box
for widget in [self.username_input, self.maigret_result_text, self.log_text]:
widget.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
layout.addWidget(tab_widget)
self.setLayout(layout)
def run_maigret_search(self):
username = self.username_input.text()
if not username:
QMessageBox.warning(self, "Warning", "Please enter a username.")
return
# Create an instance of the Maigret search thread and pass the username
self.maigret_thread = MaigretSearchThread(username)
self.maigret_thread.maigret_finished.connect(self.display_maigret_results)
self.maigret_thread.log_message.connect(self.display_log)
# Start the Maigret search thread
self.maigret_thread.start()
# Start the timer to update the Maigret status in the log every 15 seconds
self.maigret_timer.start()
self.display_maigret_results("""Searching with Maigret...
~~~~~~~~~~~~~~~~~~~~~~~~~
This can take a while depending on your network speed
the estimated wait time is around 5 to 7 minutes.""")
print("""
{Scavenger-Osint-GUI] User Interaction: (Maigret Usersearch) Started...
- Esimated wait time is about 5 to 7 minutes!""")
def update_maigret_status(self):
if self.maigret_thread and self.maigret_thread.isRunning():
# Calculate the duration and notify the user
current_time = datetime.now()
duration = current_time - self.maigret_thread.start_time
self.display_log(f"Maigret is still running. Please wait. Duration: {duration}")
else:
# If the thread is not running, stop the timer
self.maigret_timer.stop()
def display_maigret_results(self, result):
# Display the result in the Maigret results tab
self.maigret_result_text.setPlainText(result)
def display_log(self, log_message):
# Display log messages in the "Logs" tab
self.log_text.append(log_message)
def closeEvent(self, event):
# Save the Maigret results when the window is closed
maigret_results = self.maigret_result_text.toPlainText()
with open("reports/maigret_results.txt", "w") as f:
f.write(maigret_results)
event.accept()
def showEvent(self, event):
# Load the saved Maigret results when the window is shown
try:
with open("reports/maigret_results.txt", "r") as f:
maigret_results = f.read()
self.maigret_result_text.setPlainText(maigret_results)
except FileNotFoundError:
pass
event.accept()
os.system("rm -rf reports")
class UserSearchThread(QThread):
# Add error signal
search_result = pyqtSignal(str)
error = pyqtSignal(str)
log = pyqtSignal(str)
def __init__(self, username, url_list):
super().__init__()
self.username = username
self.url_list = url_list
def run(self):
for url in self.url_list:
url = urllib.parse.urljoin(url, self.username)
try:
s = requests.Session()
s.headers.update(headers)
response = s.get(url, allow_redirects=False, timeout=5)
if response.status_code == 200 and self.username.lower() in response.text.lower():
result = f"• {self.username} | [✓] URL: {url} {response.status_code}"
# Emit the search result through the signal
self.search_result.emit(result)
except (ConnectionError, TooManyRedirects, RequestException, SSLError, TimeoutError) as e:
# Emit the error through the signal
self.error.emit(f"Error during search for user in {url}: {str(e)}")
except Exception as e:
# Emit the error through the signal
self.error.emit(f"Unexpected error during search for user in {url}: {str(e)}")
finally:
# Emit log message
self.log.emit(f"Search for user in {url} completed.")
class UserSearchGUI(QWidget):
def __init__(self):
super().__init__()
self.username_input = QLineEdit()
self.result_text = QTextEdit()
self.error_text = QTextEdit()
self.log_text = QTextEdit()
self.search_thread = None # Initialize search_thread as None
self.init_ui()
def init_ui(self):
layout = QVBoxLayout()
label_username = QLabel("Enter target username:")
layout.addWidget(label_username)
layout.addWidget(self.username_input)
search_button = QPushButton("- ᴄʟɪᴄᴋ ʜᴇʀᴇ ᴛᴏ ꜱᴛᴀʀᴛ -")
search_button.clicked.connect(self.run_user_search)
layout.addWidget(search_button)
# Create a tab widget
tabs = QTabWidget()
# Create tabs for results, errors, and logging
results_tab = QWidget()
errors_tab = QWidget()
log_tab = QWidget()
# Add the tabs to the tab widget
tabs.addTab(results_tab, "Results")
tabs.addTab(errors_tab, "Errors")
tabs.addTab(log_tab, "Logging")
# Set layouts for tabs
results_layout = QVBoxLayout(results_tab)
errors_layout = QVBoxLayout(errors_tab)
log_layout = QVBoxLayout(log_tab)
# Add widgets to the layouts
results_layout.addWidget(self.result_text)
errors_layout.addWidget(self.error_text)
log_layout.addWidget(self.log_text)
# Set the background color for the text boxes in all tabs
for text_edit in [self.result_text, self.error_text, self.log_text]:
text_edit.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add layouts to the corresponding tabs
results_tab.setLayout(results_layout)
errors_tab.setLayout(errors_layout)
log_tab.setLayout(log_layout)
# Add the tab widget to the main layout
layout.addWidget(tabs)
self.setLayout(layout)
for widget in [self.username_input, self.result_text, self.error_text, self.log_text]:
widget.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
def run_user_search(self):
target_username = self.username_input.text()
if not target_username:
QMessageBox.warning(self, "Warning", "Please enter a target username.")
return
# Create an instance of the username search thread and pass the target_username and url_list
url_list = self.load_urls_from_file()
self.search_thread = UserSearchThread(target_username, url_list)
self.search_thread.search_result.connect(self.display_username_search_result)
self.search_thread.error.connect(self.display_error)
self.search_thread.log.connect(self.display_log)
# Start the search thread
self.search_thread.start()
self.display_username_search_result("Searching for user in URLs...")
def display_username_search_result(self, result):
self.result_text.append(result)
def display_error(self, error):
self.error_text.append(error)
def display_log(self, log):
self.log_text.append(log)
def load_urls_from_file(self):
try:
with open("src/urls.txt", "r") as f:
return [x.strip() for x in f.readlines()]
except FileNotFoundError:
QMessageBox.warning(self, "Warning", "URLs file (src/urls.txt) not found.")
return []
class HomeWindow(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
main_layout = QVBoxLayout()
# Create a QHBoxLayout for the widgets on the right side (image, name, and bio)
right_layout = QHBoxLayout()
# Create a QLabel for displaying the image
image_label = QLabel(self)
pixmap = QPixmap('img/discord.jpg') # Replace 'img/profile_image.jpg' with the actual path to your image
pixmap = pixmap.scaledToWidth(100) # Set the desired width
image_label.setPixmap(pixmap)
image_label.setAlignment(Qt.AlignCenter) # Center the image
# Create a QVBoxLayout for the right side (name and bio)
text_layout = QVBoxLayout()
# Create a QLabel for displaying the name
name_label = QLabel('Scavenger Osint GUI')
name_label.setAlignment(Qt.AlignCenter) # Center the text
# Create a QTextEdit for displaying the bio
bio_box = QTextEdit()
bio_box.setReadOnly(True)
bio_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Read content from bio.txt file and set it to the bio box
try:
with open('src/bio.txt', 'r') as file:
bio_text = file.read()
bio_box.setPlainText(bio_text)
except FileNotFoundError:
bio_box.setPlainText("Bio file not found.")
# Add name and bio widgets to the text layout
text_layout.addWidget(name_label)
text_layout.addWidget(bio_box)
# Add image and text layout to the right layout
right_layout.addWidget(image_label)
right_layout.addLayout(text_layout)
# Add the right layout to the main layout
main_layout.addLayout(right_layout)
# Create a scrollable box for displaying system information
info_box = QPlainTextEdit()
info_box.setReadOnly(True)
info_box.setStyleSheet("background-color: #303030; color: white; border: 1px solid #cyan;")
# Add the info box to the main layout
main_layout.addWidget(info_box)
self.setLayout(main_layout)
# Get and display system information | get_system_info(info_box) | 0 | 2023-11-02 06:46:11+00:00 | 4k |
xenxxxx/BitPay-Crypto-Signal-Trading-Bot | tests/test_configuration.py | [
{
"identifier": "CURRENT_TEST_STRATEGY",
"path": "tests/conftest.py",
"snippet": "CURRENT_TEST_STRATEGY = 'StrategyTestV3'"
},
{
"identifier": "log_has",
"path": "tests/conftest.py",
"snippet": "def log_has(line, logs):\n \"\"\"Check if line is found on some caplog's message.\"\"\"\n ... | import json
import warnings
import pytest
from copy import deepcopy
from pathlib import Path
from unittest.mock import MagicMock
from jsonschema import ValidationError
from freqtrade.commands import Arguments
from freqtrade.configuration import Configuration, validate_config_consistency
from freqtrade.configuration.config_validation import validate_config_schema
from freqtrade.configuration.deprecated_settings import (check_conflicting_settings,
process_deprecated_setting,
process_removed_setting,
process_temporary_deprecated_settings)
from freqtrade.configuration.environment_vars import flat_vars_to_nested_dict
from freqtrade.configuration.load_config import (load_config_file, load_file, load_from_files,
log_config_error_range)
from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX
from freqtrade.enums import RunMode
from freqtrade.exceptions import OperationalException
from tests.conftest import (CURRENT_TEST_STRATEGY, log_has, log_has_re,
patched_configuration_load_config_file) | 2,864 |
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
with pytest.raises(OperationalException,
match='Market exit orders require exit_pricing.price_side = "other".'):
validate_config_consistency(conf)
# Validate inversed case
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
conf['order_types']['entry'] = 'market'
conf['exit_pricing']['price_side'] = 'bid'
conf['entry_pricing']['price_side'] = 'ask'
validate_config_consistency(conf)
def test_validate_tsl(default_conf):
default_conf['stoploss'] = 0.0
with pytest.raises(OperationalException, match='The config stoploss needs to be different '
'from 0 to avoid problems with sell orders.'):
validate_config_consistency(default_conf)
default_conf['stoploss'] = -0.10
default_conf['trailing_stop'] = True
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0
default_conf['trailing_only_offset_is_reached'] = True
with pytest.raises(OperationalException,
match=r'The config trailing_only_offset_is_reached needs '
'trailing_stop_positive_offset to be more than 0 in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive_offset'] = 0.01
default_conf['trailing_stop_positive'] = 0.015
with pytest.raises(OperationalException,
match=r'The config trailing_stop_positive_offset needs '
'to be greater than trailing_stop_positive in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive'] = 0.01
default_conf['trailing_stop_positive_offset'] = 0.015
validate_config_consistency(default_conf)
# 0 trailing stop positive - results in "Order would trigger immediately"
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0.02
default_conf['trailing_only_offset_is_reached'] = False
with pytest.raises(OperationalException,
match='The config trailing_stop_positive needs to be different from 0 '
'to avoid problems with sell orders'):
validate_config_consistency(default_conf)
def test_validate_edge2(edge_conf):
edge_conf.update({
"use_exit_signal": True,
})
# Passes test
validate_config_consistency(edge_conf)
edge_conf.update({
"use_exit_signal": False,
})
with pytest.raises(OperationalException, match="Edge requires `use_exit_signal` to be True, "
"otherwise no sells will happen."):
validate_config_consistency(edge_conf)
def test_validate_whitelist(default_conf):
default_conf['runmode'] = RunMode.DRY_RUN
# Test regular case - has whitelist and uses StaticPairlist
validate_config_consistency(default_conf)
conf = deepcopy(default_conf)
del conf['exchange']['pair_whitelist']
# Test error case
with pytest.raises(OperationalException,
match="StaticPairList requires pair_whitelist to be set."):
validate_config_consistency(conf)
conf = deepcopy(default_conf)
conf.update({"pairlists": [{
"method": "VolumePairList",
}]})
# Dynamic whitelist should not care about pair_whitelist
validate_config_consistency(conf)
del conf['exchange']['pair_whitelist']
validate_config_consistency(conf)
@pytest.mark.parametrize('protconf,expected', [
([], None),
([{"method": "StoplossGuard", "lookback_period": 2000, "stop_duration_candles": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "lookback_period": 2000,
"stop_duration": 10}], r'Protections must specify either `lookback_period`.*'),
([{"method": "StoplossGuard", "lookback_period": 20, "stop_duration": 10,
"stop_duration_candles": 10}], r'Protections must specify either `stop_duration`.*'),
])
def test_validate_protections(default_conf, protconf, expected):
conf = deepcopy(default_conf)
conf['protections'] = protconf
if expected:
with pytest.raises(OperationalException, match=expected):
validate_config_consistency(conf)
else:
validate_config_consistency(conf)
def test_validate_ask_orderbook(default_conf, caplog) -> None:
conf = deepcopy(default_conf)
conf['exit_pricing']['use_order_book'] = True
conf['exit_pricing']['order_book_min'] = 2
conf['exit_pricing']['order_book_max'] = 2
validate_config_consistency(conf)
| # pragma pylint: disable=missing-docstring, protected-access, invalid-name
@pytest.fixture(scope="function")
def all_conf():
config_file = Path(__file__).parents[1] / "config_examples/config_full.example.json"
conf = load_config_file(str(config_file))
return conf
def test_load_config_missing_attributes(default_conf) -> None:
conf = deepcopy(default_conf)
conf.pop('exchange')
with pytest.raises(ValidationError, match=r".*'exchange' is a required property.*"):
validate_config_schema(conf)
conf = deepcopy(default_conf)
conf.pop('stake_currency')
conf['runmode'] = RunMode.DRY_RUN
with pytest.raises(ValidationError, match=r".*'stake_currency' is a required property.*"):
validate_config_schema(conf)
def test_load_config_incorrect_stake_amount(default_conf) -> None:
default_conf['stake_amount'] = 'fake'
with pytest.raises(ValidationError, match=r".*'fake' does not match 'unlimited'.*"):
validate_config_schema(default_conf)
def test_load_config_file(default_conf, mocker, caplog) -> None:
del default_conf['user_data_dir']
default_conf['datadir'] = str(default_conf['datadir'])
file_mock = mocker.patch('freqtrade.configuration.load_config.Path.open', mocker.mock_open(
read_data=json.dumps(default_conf)
))
validated_conf = load_config_file('somefile')
assert file_mock.call_count == 1
assert validated_conf.items() >= default_conf.items()
def test_load_config_file_error(default_conf, mocker, caplog) -> None:
del default_conf['user_data_dir']
default_conf['datadir'] = str(default_conf['datadir'])
filedata = json.dumps(default_conf).replace(
'"stake_amount": 0.001,', '"stake_amount": .001,')
mocker.patch('freqtrade.configuration.load_config.Path.open',
mocker.mock_open(read_data=filedata))
mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
with pytest.raises(OperationalException, match=r".*Please verify the following segment.*"):
load_config_file('somefile')
def test_load_config_file_error_range(default_conf, mocker, caplog) -> None:
del default_conf['user_data_dir']
default_conf['datadir'] = str(default_conf['datadir'])
filedata = json.dumps(default_conf).replace(
'"stake_amount": 0.001,', '"stake_amount": .001,')
mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
x = log_config_error_range('somefile', 'Parse error at offset 64: Invalid value.')
assert isinstance(x, str)
assert (x == '{"max_open_trades": 1, "stake_currency": "BTC", '
'"stake_amount": .001, "fiat_display_currency": "USD", '
'"timeframe": "5m", "dry_run": true, "cance')
filedata = json.dumps(default_conf, indent=2).replace(
'"stake_amount": 0.001,', '"stake_amount": .001,')
mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
x = log_config_error_range('somefile', 'Parse error at offset 4: Invalid value.')
assert isinstance(x, str)
assert (x == ' "max_open_trades": 1,\n "stake_currency": "BTC",\n'
' "stake_amount": .001,')
x = log_config_error_range('-', '')
assert x == ''
def test_load_file_error(tmpdir):
testpath = Path(tmpdir) / 'config.json'
with pytest.raises(OperationalException, match=r"File .* not found!"):
load_file(testpath)
def test__args_to_config(caplog):
arg_list = ['trade', '--strategy-path', 'TestTest']
args = Arguments(arg_list).get_parsed_arg()
configuration = Configuration(args)
config = {}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# No warnings ...
configuration._args_to_config(config, argname="strategy_path", logstring="DeadBeef")
assert len(w) == 0
assert log_has("DeadBeef", caplog)
assert config['strategy_path'] == "TestTest"
configuration = Configuration(args)
config = {}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Deprecation warnings!
configuration._args_to_config(config, argname="strategy_path", logstring="DeadBeef",
deprecated_msg="Going away soon!")
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "DEPRECATED: Going away soon!" in str(w[-1].message)
assert log_has("DeadBeef", caplog)
assert config['strategy_path'] == "TestTest"
def test_load_config_max_open_trades_zero(default_conf, mocker, caplog) -> None:
default_conf['max_open_trades'] = 0
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf['max_open_trades'] == 0
assert 'internals' in validated_conf
def test_load_config_combine_dicts(default_conf, mocker, caplog) -> None:
conf1 = deepcopy(default_conf)
conf2 = deepcopy(default_conf)
del conf1['exchange']['key']
del conf1['exchange']['secret']
del conf2['exchange']['name']
conf2['exchange']['pair_whitelist'] += ['NANO/BTC']
config_files = [conf1, conf2]
configsmock = MagicMock(side_effect=config_files)
mocker.patch(
'freqtrade.configuration.load_config.load_config_file',
configsmock
)
arg_list = ['trade', '-c', 'test_conf.json', '--config', 'test2_conf.json', ]
args = Arguments(arg_list).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
exchange_conf = default_conf['exchange']
assert validated_conf['exchange']['name'] == exchange_conf['name']
assert validated_conf['exchange']['key'] == exchange_conf['key']
assert validated_conf['exchange']['secret'] == exchange_conf['secret']
assert validated_conf['exchange']['pair_whitelist'] != conf1['exchange']['pair_whitelist']
assert validated_conf['exchange']['pair_whitelist'] == conf2['exchange']['pair_whitelist']
assert 'internals' in validated_conf
def test_from_config(default_conf, mocker, caplog) -> None:
conf1 = deepcopy(default_conf)
conf2 = deepcopy(default_conf)
del conf1['exchange']['key']
del conf1['exchange']['secret']
del conf2['exchange']['name']
conf2['exchange']['pair_whitelist'] += ['NANO/BTC']
conf2['fiat_display_currency'] = "EUR"
config_files = [conf1, conf2]
mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x)
configsmock = MagicMock(side_effect=config_files)
mocker.patch('freqtrade.configuration.load_config.load_config_file', configsmock)
validated_conf = Configuration.from_files(['test_conf.json', 'test2_conf.json'])
exchange_conf = default_conf['exchange']
assert validated_conf['exchange']['name'] == exchange_conf['name']
assert validated_conf['exchange']['key'] == exchange_conf['key']
assert validated_conf['exchange']['secret'] == exchange_conf['secret']
assert validated_conf['exchange']['pair_whitelist'] != conf1['exchange']['pair_whitelist']
assert validated_conf['exchange']['pair_whitelist'] == conf2['exchange']['pair_whitelist']
assert validated_conf['fiat_display_currency'] == "EUR"
assert 'internals' in validated_conf
assert isinstance(validated_conf['user_data_dir'], Path)
def test_from_recursive_files(testdatadir) -> None:
files = testdatadir / "testconfigs/testconfig.json"
conf = Configuration.from_files([files])
assert conf
# Exchange comes from "the first config"
assert conf['exchange']
# Pricing comes from the 2nd config
assert conf['entry_pricing']
assert conf['entry_pricing']['price_side'] == "same"
assert conf['exit_pricing']
# The other key comes from pricing2, which is imported by pricing.json.
# pricing.json is a level higher, therefore wins.
assert conf['exit_pricing']['price_side'] == "same"
assert len(conf['config_files']) == 4
assert 'testconfig.json' in conf['config_files'][0]
assert 'test_pricing_conf.json' in conf['config_files'][1]
assert 'test_base_config.json' in conf['config_files'][2]
assert 'test_pricing2_conf.json' in conf['config_files'][3]
files = testdatadir / "testconfigs/recursive.json"
with pytest.raises(OperationalException, match="Config loop detected."):
load_from_files([files])
def test_print_config(default_conf, mocker, caplog) -> None:
conf1 = deepcopy(default_conf)
# Delete non-json elements from default_conf
del conf1['user_data_dir']
conf1['datadir'] = str(conf1['datadir'])
config_files = [conf1]
configsmock = MagicMock(side_effect=config_files)
mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x)
mocker.patch('freqtrade.configuration.configuration.load_from_files', configsmock)
validated_conf = Configuration.from_files(['test_conf.json'])
assert isinstance(validated_conf['user_data_dir'], Path)
assert "user_data_dir" in validated_conf
assert "original_config" in validated_conf
assert isinstance(json.dumps(validated_conf['original_config']), str)
def test_load_config_max_open_trades_minus_one(default_conf, mocker, caplog) -> None:
default_conf['max_open_trades'] = -1
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf['max_open_trades'] > 999999999
assert validated_conf['max_open_trades'] == float('inf')
assert "runmode" in validated_conf
assert validated_conf['runmode'] == RunMode.DRY_RUN
def test_load_config_file_exception(mocker) -> None:
mocker.patch(
'freqtrade.configuration.configuration.Path.open',
MagicMock(side_effect=FileNotFoundError('File not found'))
)
with pytest.raises(OperationalException, match=r'.*Config file "somefile" not found!*'):
load_config_file('somefile')
def test_load_config(default_conf, mocker) -> None:
del default_conf['strategy_path']
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('strategy_path') is None
assert 'edge' not in validated_conf
def test_load_config_with_params(default_conf, mocker) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path',
'--db-url', 'sqlite:///someurl',
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('strategy') == 'TestStrategy'
assert validated_conf.get('strategy_path') == '/some/path'
assert validated_conf.get('db_url') == 'sqlite:///someurl'
# Test conf provided db_url prod
conf = default_conf.copy()
conf["dry_run"] = False
conf["db_url"] = "sqlite:///path/to/db.sqlite"
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == "sqlite:///path/to/db.sqlite"
# Test conf provided db_url dry_run
conf = default_conf.copy()
conf["dry_run"] = True
conf["db_url"] = "sqlite:///path/to/db.sqlite"
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == "sqlite:///path/to/db.sqlite"
# Test args provided db_url prod
conf = default_conf.copy()
conf["dry_run"] = False
del conf["db_url"]
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == DEFAULT_DB_PROD_URL
assert "runmode" in validated_conf
assert validated_conf['runmode'] == RunMode.LIVE
# Test args provided db_url dry_run
conf = default_conf.copy()
conf["dry_run"] = True
conf["db_url"] = DEFAULT_DB_PROD_URL
patched_configuration_load_config_file(mocker, conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--strategy-path', '/some/path'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('db_url') == DEFAULT_DB_DRYRUN_URL
@pytest.mark.parametrize("config_value,expected,arglist", [
(True, True, ['trade', '--dry-run']), # Leave config untouched
(False, True, ['trade', '--dry-run']), # Override config untouched
(False, False, ['trade']), # Leave config untouched
(True, True, ['trade']), # Leave config untouched
])
def test_load_dry_run(default_conf, mocker, config_value, expected, arglist) -> None:
default_conf['dry_run'] = config_value
patched_configuration_load_config_file(mocker, default_conf)
configuration = Configuration(Arguments(arglist).get_parsed_arg())
validated_conf = configuration.load_config()
assert validated_conf['dry_run'] is expected
assert validated_conf['runmode'] == (RunMode.DRY_RUN if expected else RunMode.LIVE)
def test_load_custom_strategy(default_conf, mocker) -> None:
default_conf.update({
'strategy': 'CustomStrategy',
'strategy_path': '/tmp/strategies',
})
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('strategy') == 'CustomStrategy'
assert validated_conf.get('strategy_path') == '/tmp/strategies'
def test_show_info(default_conf, mocker, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'trade',
'--strategy', 'TestStrategy',
'--db-url', 'sqlite:///tmp/testdb',
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
configuration.get_config()
assert log_has('Using DB: "sqlite:///tmp/testdb"', caplog)
assert log_has('Dry run is enabled', caplog)
def test_setup_configuration_without_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'backtesting',
'--config', 'config.json',
'--strategy', CURRENT_TEST_STRATEGY,
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
config = configuration.get_config()
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert 'user_data_dir' in config
assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
assert 'timeframe' in config
assert not log_has('Parameter -i/--timeframe detected ...', caplog)
assert 'position_stacking' not in config
assert not log_has('Parameter --enable-position-stacking detected ...', caplog)
assert 'timerange' not in config
def test_setup_configuration_with_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
mocker.patch(
'freqtrade.configuration.configuration.create_datadir',
lambda c, x: x
)
mocker.patch(
'freqtrade.configuration.configuration.create_userdata_dir',
lambda x, *args, **kwargs: Path(x)
)
arglist = [
'backtesting',
'--config', 'config.json',
'--strategy', CURRENT_TEST_STRATEGY,
'--datadir', '/foo/bar',
'--userdir', "/tmp/freqtrade",
'--timeframe', '1m',
'--enable-position-stacking',
'--disable-max-market-positions',
'--timerange', ':100',
'--export', 'trades',
'--stake-amount', 'unlimited'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
config = configuration.get_config()
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert log_has('Using data directory: {} ...'.format("/foo/bar"), caplog)
assert log_has('Using user-data directory: {} ...'.format(Path("/tmp/freqtrade")), caplog)
assert 'user_data_dir' in config
assert 'timeframe' in config
assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
caplog)
assert 'position_stacking' in config
assert log_has('Parameter --enable-position-stacking detected ...', caplog)
assert 'use_max_market_positions' in config
assert log_has('Parameter --disable-max-market-positions detected ...', caplog)
assert log_has('max_open_trades set to unlimited ...', caplog)
assert 'timerange' in config
assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog)
assert 'export' in config
assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
assert 'stake_amount' in config
assert config['stake_amount'] == 'unlimited'
def test_setup_configuration_with_stratlist(mocker, default_conf, caplog) -> None:
"""
Test setup_configuration() function
"""
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'backtesting',
'--config', 'config.json',
'--timeframe', '1m',
'--export', 'trades',
'--strategy-list',
CURRENT_TEST_STRATEGY,
'TestStrategy'
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args, RunMode.BACKTEST)
config = configuration.get_config()
assert config['runmode'] == RunMode.BACKTEST
assert 'max_open_trades' in config
assert 'stake_currency' in config
assert 'stake_amount' in config
assert 'exchange' in config
assert 'pair_whitelist' in config['exchange']
assert 'datadir' in config
assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
assert 'timeframe' in config
assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
caplog)
assert 'strategy_list' in config
assert log_has('Using strategy list of 2 strategies', caplog)
assert 'position_stacking' not in config
assert 'use_max_market_positions' not in config
assert 'timerange' not in config
assert 'export' in config
assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
def test_hyperopt_with_arguments(mocker, default_conf, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
arglist = [
'hyperopt',
'--epochs', '10',
'--spaces', 'all',
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args, RunMode.HYPEROPT)
config = configuration.get_config()
assert 'epochs' in config
assert int(config['epochs']) == 10
assert log_has('Parameter --epochs detected ... Will run Hyperopt with for 10 epochs ...',
caplog)
assert 'spaces' in config
assert config['spaces'] == ['all']
assert log_has("Parameter -s/--spaces detected: ['all']", caplog)
assert "runmode" in config
assert config['runmode'] == RunMode.HYPEROPT
def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None:
patched_configuration_load_config_file(mocker, default_conf)
# Prevent setting loggers
mocker.patch('freqtrade.loggers.set_loggers', MagicMock)
arglist = ['trade', '-vvv']
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('verbosity') == 3
assert log_has('Verbosity set to 3', caplog)
def test_set_logfile(default_conf, mocker, tmpdir):
patched_configuration_load_config_file(mocker, default_conf)
f = Path(tmpdir / "test_file.log")
assert not f.is_file()
arglist = [
'trade', '--logfile', str(f),
]
args = Arguments(arglist).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf['logfile'] == str(f)
assert f.is_file()
try:
f.unlink()
except Exception:
pass
def test_load_config_warn_forcebuy(default_conf, mocker, caplog) -> None:
default_conf['force_entry_enable'] = True
patched_configuration_load_config_file(mocker, default_conf)
args = Arguments(['trade']).get_parsed_arg()
configuration = Configuration(args)
validated_conf = configuration.load_config()
assert validated_conf.get('force_entry_enable')
assert log_has('`force_entry_enable` RPC message enabled.', caplog)
def test_validate_default_conf(default_conf) -> None:
# Validate via our validator - we allow setting defaults!
validate_config_schema(default_conf)
def test_validate_max_open_trades(default_conf):
default_conf['max_open_trades'] = float('inf')
default_conf['stake_amount'] = 'unlimited'
with pytest.raises(OperationalException, match='`max_open_trades` and `stake_amount` '
'cannot both be unlimited.'):
validate_config_consistency(default_conf)
def test_validate_price_side(default_conf):
default_conf['order_types'] = {
"entry": "limit",
"exit": "limit",
"stoploss": "limit",
"stoploss_on_exchange": False,
}
# Default should pass
validate_config_consistency(default_conf)
conf = deepcopy(default_conf)
conf['order_types']['entry'] = 'market'
with pytest.raises(OperationalException,
match='Market entry orders require entry_pricing.price_side = "other".'):
validate_config_consistency(conf)
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
with pytest.raises(OperationalException,
match='Market exit orders require exit_pricing.price_side = "other".'):
validate_config_consistency(conf)
# Validate inversed case
conf = deepcopy(default_conf)
conf['order_types']['exit'] = 'market'
conf['order_types']['entry'] = 'market'
conf['exit_pricing']['price_side'] = 'bid'
conf['entry_pricing']['price_side'] = 'ask'
validate_config_consistency(conf)
def test_validate_tsl(default_conf):
default_conf['stoploss'] = 0.0
with pytest.raises(OperationalException, match='The config stoploss needs to be different '
'from 0 to avoid problems with sell orders.'):
validate_config_consistency(default_conf)
default_conf['stoploss'] = -0.10
default_conf['trailing_stop'] = True
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0
default_conf['trailing_only_offset_is_reached'] = True
with pytest.raises(OperationalException,
match=r'The config trailing_only_offset_is_reached needs '
'trailing_stop_positive_offset to be more than 0 in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive_offset'] = 0.01
default_conf['trailing_stop_positive'] = 0.015
with pytest.raises(OperationalException,
match=r'The config trailing_stop_positive_offset needs '
'to be greater than trailing_stop_positive in your config.'):
validate_config_consistency(default_conf)
default_conf['trailing_stop_positive'] = 0.01
default_conf['trailing_stop_positive_offset'] = 0.015
validate_config_consistency(default_conf)
# 0 trailing stop positive - results in "Order would trigger immediately"
default_conf['trailing_stop_positive'] = 0
default_conf['trailing_stop_positive_offset'] = 0.02
default_conf['trailing_only_offset_is_reached'] = False
with pytest.raises(OperationalException,
match='The config trailing_stop_positive needs to be different from 0 '
'to avoid problems with sell orders'):
validate_config_consistency(default_conf)
def test_validate_edge2(edge_conf):
edge_conf.update({
"use_exit_signal": True,
})
# Passes test
validate_config_consistency(edge_conf)
edge_conf.update({
"use_exit_signal": False,
})
with pytest.raises(OperationalException, match="Edge requires `use_exit_signal` to be True, "
"otherwise no sells will happen."):
validate_config_consistency(edge_conf)
def test_validate_whitelist(default_conf):
default_conf['runmode'] = RunMode.DRY_RUN
# Test regular case - has whitelist and uses StaticPairlist
validate_config_consistency(default_conf)
conf = deepcopy(default_conf)
del conf['exchange']['pair_whitelist']
# Test error case
with pytest.raises(OperationalException,
match="StaticPairList requires pair_whitelist to be set."):
validate_config_consistency(conf)
conf = deepcopy(default_conf)
conf.update({"pairlists": [{
"method": "VolumePairList",
}]})
# Dynamic whitelist should not care about pair_whitelist
validate_config_consistency(conf)
del conf['exchange']['pair_whitelist']
validate_config_consistency(conf)
@pytest.mark.parametrize('protconf,expected', [
([], None),
([{"method": "StoplossGuard", "lookback_period": 2000, "stop_duration_candles": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], None),
([{"method": "StoplossGuard", "lookback_period_candles": 20, "lookback_period": 2000,
"stop_duration": 10}], r'Protections must specify either `lookback_period`.*'),
([{"method": "StoplossGuard", "lookback_period": 20, "stop_duration": 10,
"stop_duration_candles": 10}], r'Protections must specify either `stop_duration`.*'),
])
def test_validate_protections(default_conf, protconf, expected):
conf = deepcopy(default_conf)
conf['protections'] = protconf
if expected:
with pytest.raises(OperationalException, match=expected):
validate_config_consistency(conf)
else:
validate_config_consistency(conf)
def test_validate_ask_orderbook(default_conf, caplog) -> None:
conf = deepcopy(default_conf)
conf['exit_pricing']['use_order_book'] = True
conf['exit_pricing']['order_book_min'] = 2
conf['exit_pricing']['order_book_max'] = 2
validate_config_consistency(conf) | assert log_has_re(r"DEPRECATED: Please use `order_book_top` instead of.*", caplog) | 2 | 2023-11-07 18:46:03+00:00 | 4k |
thedataninja1786/shallowgrad | examples/mnist.py | [
{
"identifier": "nn",
"path": "shallowgrad/nn.py",
"snippet": "class nn:\n class loss:\n instances = [] \n @staticmethod\n def backward_prop(g_loss): # g of loss \n delta = np.copy(g_loss)\n for i in reversed(range(len(nn.loss.instances))):\n delta = nn.loss.instances[i].backw... | from shallowgrad.nn import nn
from optimizers.optimizers import Adam
import gzip
import numpy as np | 2,375 |
def read_file(fp):
with open(fp, "rb") as f:
dat = f.read()
return np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
X_train = read_file(r"datasets\b0cdab8e37ae7c1c5560ee858afaac1d")[0x10:]
Y_train = read_file(r"datasets\d4fdde61aca9f72d5fe2315410bb46a5")[8:]
X_train = X_train.reshape((-1,784))
Y = Y_train.reshape(-1,1)
X = np.array(X_train / 255)
l1 = nn.Linear(784,2500,activation='ReLU',bias=True)
l2 = nn.Linear(2500,1000,activation='ReLU',bias=True)
l3 = nn.Linear(1000,10,bias=True)
loss = nn.CrossEntropyLoss()
|
def read_file(fp):
with open(fp, "rb") as f:
dat = f.read()
return np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
X_train = read_file(r"datasets\b0cdab8e37ae7c1c5560ee858afaac1d")[0x10:]
Y_train = read_file(r"datasets\d4fdde61aca9f72d5fe2315410bb46a5")[8:]
X_train = X_train.reshape((-1,784))
Y = Y_train.reshape(-1,1)
X = np.array(X_train / 255)
l1 = nn.Linear(784,2500,activation='ReLU',bias=True)
l2 = nn.Linear(2500,1000,activation='ReLU',bias=True)
l3 = nn.Linear(1000,10,bias=True)
loss = nn.CrossEntropyLoss() | optim = Adam(layers=[l1,l2,l3],lr=3e-4) | 1 | 2023-11-07 18:13:43+00:00 | 4k |
ssajedi/SAiF-GPT | app.py | [
{
"identifier": "extract_pdf_text",
"path": "utils.py",
"snippet": "def extract_pdf_text(file):\n \"\"\"\n Extracts text paragraphs from a PDF file.\n \"\"\"\n pdf_reader = PyPDF2.PdfReader(file)\n pdf_dict={}\n for ip in range(len(pdf_reader.pages)):\n pdf_dict[ip] = pdf_reader... | import streamlit as st
import openai
import streamlit as st
from utils import extract_pdf_text
from text_effects import highlight_phrases_in_paragraph
from ner import Anonymizer | 2,978 |
st.set_page_config(page_title="🔒 SAiF-GPT", page_icon="🤫",layout="wide")
st.title("SAiF-GPT")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
ent_types_select = st.sidebar.multiselect("Entity list", ["LOC", "PER","ORG",'EMAIL','PHONE'], ["LOC", "PER","ORG"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.anonymizer = None
st.session_state.ref_doc = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None:
_,chunks = extract_pdf_text(uploaded_file)
# st.write(chunks)
with open("hack_secret.txt") as f:
# st.write("Using OpenAI API key:", f.read())
openai.api_key = f.read()
# Building a front end with streamlit
# ref: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.chat_hist = []
for message in st.session_state.chat_hist:
with st.chat_message(message["role"]):
st.markdown(message["content"], unsafe_allow_html=True)
if prompt := st.chat_input("What is up?"):
if len(st.session_state.chat_hist)==0:
# ref_doc = "\n".join(chunks)
# ref_doc = chunks[0]
ref_doc = "\n".join(chunks)
# ref_doc = """ExxonMobil Infrastructure Development Proposal Executive Summary: This comprehensive proposal envisions the construction of ExxonMobil's new operational hub, designed to bolster its strategic expansion and operational excellence within the energy sector. Introduction: We propose to construct a state-of-the-art facility that reflects ExxonMobil's commitment to innovation, sustainability, and global leadership in energy. The project will span a meticulously selected 35,000-square-foot site in Houston, Texas, with the potential to become a landmark of industrial prowess and architectural ingenuity. Project Team: Leading the project will be Chief Project Engineer, Thomas Booker, with over two decades of experience in industrial construction. Architectural design will be spearheaded by Ava Clarke, whose portfolio includes several LEED-certified buildings across Dallas. Our environmental engineering efforts will be led by Dylan Rhodes in Austin, ensuring adherence to the most stringent ecological standards. Site and Structure: The facility will be located in the heart of Houston’s Energy Corridor, taking advantage of the area's rich infrastructure and proximity to ExxonMobil’s main operations. Geotechnical assessments and site preparation will be undertaken by San Antonio-based expert, Nora Quintana. The building's framework, designed for resilience and adaptability, will be overseen by structural engineer Alex Johnson from Fort Worth. Sustainability and Environment: Sus##tainability Coordinator, Rachel Santos from Corpus Christi, will implement cutting-edge green technologies, including a state-of-the-art HVAC system designed by El Paso's mechanical engineer, Omar Fernandez. Rainwater harvesting and waste management systems will be developed in collaboration with environmental specialists from Galveston Email address: test@gmail.com 123-456-7890"""
# llm_prompt = augment_prompt(prompt,chunks[0])
anmz = Anonymizer()
safe_prompt = anmz.anonymize(prompt,ent_types_select)
safe_doc = anmz.anonymize(ref_doc,ent_types_select)
st.session_state.anonymizer = anmz
st.session_state.ref_doc = ref_doc
llm_prompt = f"***{safe_prompt}***+```{safe_doc}```"
st.write(safe_prompt)
else:
safe_prompt = st.session_state.anonymizer.anonymize(prompt,ent_types_select)
llm_prompt = safe_prompt
st.write(llm_prompt)
st.session_state.messages.append({"role": "user", "content": llm_prompt})
st.session_state.chat_hist.append({'role':'user', 'content':prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# entities
decoded_message = st.session_state.anonymizer.deanonymize(full_response)
phrases_to_highlight = {}
ent_data = st.session_state.anonymizer.deanonymization_map
st.session_state.phrases_to_highlight = phrases_to_highlight
# get values of diciotnary and save as list
ent_data = list(ent_data.values())
for ent in ent_data:
phrases_to_highlight[ent] = None
# st.write(phrases_to_highlight)
|
st.set_page_config(page_title="🔒 SAiF-GPT", page_icon="🤫",layout="wide")
st.title("SAiF-GPT")
system_prompt="""You are a helpful assistant, your task is to review an uploaded document\
uploaded by a user.\
The user query is delimited by triple asterisks.\
The reference documents in that message are delimited with triple backticks.\
A user might ask follow up questions.
"""
# add a selectbox to the sidebar
ent_types_select = st.sidebar.multiselect("Entity list", ["LOC", "PER","ORG",'EMAIL','PHONE'], ["LOC", "PER","ORG"])
# add a clear button to the sidebar
if st.sidebar.button("Clear"):
st.session_state.chat_hist = []
st.session_state.messages = []
st.session_state.anonymizer = None
st.session_state.ref_doc = None
# add
# add a n upload pdf button to the sidebar
uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False)
if uploaded_file is not None:
_,chunks = extract_pdf_text(uploaded_file)
# st.write(chunks)
with open("hack_secret.txt") as f:
# st.write("Using OpenAI API key:", f.read())
openai.api_key = f.read()
# Building a front end with streamlit
# ref: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.chat_hist = []
for message in st.session_state.chat_hist:
with st.chat_message(message["role"]):
st.markdown(message["content"], unsafe_allow_html=True)
if prompt := st.chat_input("What is up?"):
if len(st.session_state.chat_hist)==0:
# ref_doc = "\n".join(chunks)
# ref_doc = chunks[0]
ref_doc = "\n".join(chunks)
# ref_doc = """ExxonMobil Infrastructure Development Proposal Executive Summary: This comprehensive proposal envisions the construction of ExxonMobil's new operational hub, designed to bolster its strategic expansion and operational excellence within the energy sector. Introduction: We propose to construct a state-of-the-art facility that reflects ExxonMobil's commitment to innovation, sustainability, and global leadership in energy. The project will span a meticulously selected 35,000-square-foot site in Houston, Texas, with the potential to become a landmark of industrial prowess and architectural ingenuity. Project Team: Leading the project will be Chief Project Engineer, Thomas Booker, with over two decades of experience in industrial construction. Architectural design will be spearheaded by Ava Clarke, whose portfolio includes several LEED-certified buildings across Dallas. Our environmental engineering efforts will be led by Dylan Rhodes in Austin, ensuring adherence to the most stringent ecological standards. Site and Structure: The facility will be located in the heart of Houston’s Energy Corridor, taking advantage of the area's rich infrastructure and proximity to ExxonMobil’s main operations. Geotechnical assessments and site preparation will be undertaken by San Antonio-based expert, Nora Quintana. The building's framework, designed for resilience and adaptability, will be overseen by structural engineer Alex Johnson from Fort Worth. Sustainability and Environment: Sus##tainability Coordinator, Rachel Santos from Corpus Christi, will implement cutting-edge green technologies, including a state-of-the-art HVAC system designed by El Paso's mechanical engineer, Omar Fernandez. Rainwater harvesting and waste management systems will be developed in collaboration with environmental specialists from Galveston Email address: test@gmail.com 123-456-7890"""
# llm_prompt = augment_prompt(prompt,chunks[0])
anmz = Anonymizer()
safe_prompt = anmz.anonymize(prompt,ent_types_select)
safe_doc = anmz.anonymize(ref_doc,ent_types_select)
st.session_state.anonymizer = anmz
st.session_state.ref_doc = ref_doc
llm_prompt = f"***{safe_prompt}***+```{safe_doc}```"
st.write(safe_prompt)
else:
safe_prompt = st.session_state.anonymizer.anonymize(prompt,ent_types_select)
llm_prompt = safe_prompt
st.write(llm_prompt)
st.session_state.messages.append({"role": "user", "content": llm_prompt})
st.session_state.chat_hist.append({'role':'user', 'content':prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# entities
decoded_message = st.session_state.anonymizer.deanonymize(full_response)
phrases_to_highlight = {}
ent_data = st.session_state.anonymizer.deanonymization_map
st.session_state.phrases_to_highlight = phrases_to_highlight
# get values of diciotnary and save as list
ent_data = list(ent_data.values())
for ent in ent_data:
phrases_to_highlight[ent] = None
# st.write(phrases_to_highlight) | highlighted_Text = highlight_phrases_in_paragraph(decoded_message,phrases_to_highlight) | 1 | 2023-11-04 18:14:49+00:00 | 4k |
awslabs/optimizing-multitask-training-through-dynamic-pipelines | dynapipe/schedule_opt/ofob_schedule.py | [
{
"identifier": "DEBUG_PRINT_EXECUTORS",
"path": "dynapipe/schedule_opt/schedule_common.py",
"snippet": "DEBUG_REPLACE_OP_NAMES_WITH_INSTRS = False\nDEBUG_REPLACE_OP_NAMES_WITH_SCH_STATS = False\nDEBUG_PRINT_EXECUTORS = []\n COMP_THREAD = 0\n COMM_THREAD = 1\ndef _is_last_fw_or_bw_stage(\n flat... | import logging
from collections import defaultdict
from typing import Dict, List, Optional, Tuple
from .schedule_common import (
DEBUG_PRINT_EXECUTORS,
ExecutorIndex,
ScheduleExecutor,
ScheduleOperation,
SchedulerMinibatchSpec,
)
from .wait_free_schedule import WaitFreeExecutor, WaitFreeScheduler | 2,827 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
class OFOBExecutor(WaitFreeExecutor):
def __init__(
self,
executor_id: int,
thread_id: int,
n_orig_layers: int,
assigned_stages: List[Tuple[int, float, bool]],
n_executors: int,
is_comm_stage: bool = False,
include_memory_stats: bool = True,
parent_executor: Optional[ScheduleExecutor] = None,
logger: Optional[logging.Logger] = None,
) -> None:
super().__init__(
executor_id,
thread_id,
n_orig_layers,
assigned_stages,
is_comm_stage,
include_memory_stats,
parent_executor,
logger,
)
if not self.is_comm_stage:
assert len(self.fw_stages) == len(
self.bw_stages
), "Mismatched number of forward and backward layers"
self.is_executing = False
self.next_op = (0, 0, True) # (microbatch, chunk_id, is_forward)
self.n_executors = n_executors
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
self._increment_next_op_fn = None
self._try_execute_fn = None
def register_increment_next_op_fn(self, fn):
self._increment_next_op_fn = fn
def register_try_execute_fn(self, fn):
self._try_execute_fn = fn
def reset(self):
super().reset()
self.available_queue = []
self.next_op = (0, 0, True)
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
def set_n_microbatches(self, n_microbatches):
self.n_microbatches = n_microbatches
def add_operation(self, op: ScheduleOperation):
if op.is_forward:
assert (
op.flattened_stage in self.fw_stages
), "Operation {} not in executor".format(op)
else:
assert (
op.flattened_stage in self.bw_stages
), "Operation {} not in executor".format(op)
self.available_queue.append(op)
def _increment_next_op(self):
assert self._increment_next_op_fn is not None
return self._increment_next_op_fn(self)
def try_execute(self, current_time):
assert self._try_execute_fn is not None
if (
self.executed_fw_ops == 0
and self.is_comm_stage
and len(self.fw_stages) == 0
):
# no fw layers assigned, skip once
self.executed_fw_ops = 1
self._increment_next_op()
return self._try_execute_fn(self, current_time)
def finish_execute(self):
self.is_executing = False
def debug_print(self, *args):
# overrides parent debug_print
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
class OFOBExecutor(WaitFreeExecutor):
def __init__(
self,
executor_id: int,
thread_id: int,
n_orig_layers: int,
assigned_stages: List[Tuple[int, float, bool]],
n_executors: int,
is_comm_stage: bool = False,
include_memory_stats: bool = True,
parent_executor: Optional[ScheduleExecutor] = None,
logger: Optional[logging.Logger] = None,
) -> None:
super().__init__(
executor_id,
thread_id,
n_orig_layers,
assigned_stages,
is_comm_stage,
include_memory_stats,
parent_executor,
logger,
)
if not self.is_comm_stage:
assert len(self.fw_stages) == len(
self.bw_stages
), "Mismatched number of forward and backward layers"
self.is_executing = False
self.next_op = (0, 0, True) # (microbatch, chunk_id, is_forward)
self.n_executors = n_executors
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
self._increment_next_op_fn = None
self._try_execute_fn = None
def register_increment_next_op_fn(self, fn):
self._increment_next_op_fn = fn
def register_try_execute_fn(self, fn):
self._try_execute_fn = fn
def reset(self):
super().reset()
self.available_queue = []
self.next_op = (0, 0, True)
self.n_microbatches = None
self.executed_fw_ops = 0
self.executed_bw_ops = 0
def set_n_microbatches(self, n_microbatches):
self.n_microbatches = n_microbatches
def add_operation(self, op: ScheduleOperation):
if op.is_forward:
assert (
op.flattened_stage in self.fw_stages
), "Operation {} not in executor".format(op)
else:
assert (
op.flattened_stage in self.bw_stages
), "Operation {} not in executor".format(op)
self.available_queue.append(op)
def _increment_next_op(self):
assert self._increment_next_op_fn is not None
return self._increment_next_op_fn(self)
def try_execute(self, current_time):
assert self._try_execute_fn is not None
if (
self.executed_fw_ops == 0
and self.is_comm_stage
and len(self.fw_stages) == 0
):
# no fw layers assigned, skip once
self.executed_fw_ops = 1
self._increment_next_op()
return self._try_execute_fn(self, current_time)
def finish_execute(self):
self.is_executing = False
def debug_print(self, *args):
# overrides parent debug_print | if self.executor_id in DEBUG_PRINT_EXECUTORS and self.logger: | 0 | 2023-11-08 07:58:20+00:00 | 4k |
lich0821/ShaDiaoRobot | actions/chitchat/seq2seq.py | [
{
"identifier": "Data",
"path": "actions/chitchat/data_processing.py",
"snippet": "class Data(object):\n def __init__(self, config) -> None:\n self.config = config\n self.seq_path = config[\"data_path\"] + config[\"dataset\"] + \".data\"\n self.conv_path = config[\"data_path\"] +... | import logging
import os
import sys
import jieba
import tensorflow as tf
from tqdm import tqdm
from .data_processing import Data, add_flag | 3,353 |
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
# 初始化batch_sz、dec_units、embedding 、gru 、fc、attention
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, y, hidden, enc_output):
# 首先对enc_output、以及decoder的hidden计算attention,输出上下文语境向量
context_vector, attention_weights = self.attention(hidden, enc_output)
# 对decoder的输入进行embedding
y = self.embedding(y)
# 拼接上下文语境与decoder的输入embedding,并送入gru中
y = tf.concat([tf.expand_dims(context_vector, 1), y], axis=-1)
output, state = self.gru(y)
# 将gru的输出进行维度转换,送入全连接神经网络 得到最后的结果
output = tf.reshape(output, (-1, output.shape[2]))
y = self.fc(output)
return y, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_size, self.dec_units))
class Seq2Seq(object):
def __init__(self, config) -> None:
self.config = config
vacab_size_in = config['vacab_size_in']
vacab_size_out = config['vacab_size_out']
embedding_dim = config['embedding_dim']
self.units = config['layer_size']
self.batch_size = config['batch_size']
self.encoder = Encoder(vacab_size_in, embedding_dim, self.units, self.batch_size)
self.decoder = Decoder(vacab_size_out, embedding_dim, self.units, self.batch_size)
self.optimizer = tf.keras.optimizers.Adam()
# self.optimizer = tf.keras.optimizers.legacy.Adam()
self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, encoder=self.encoder, decoder=self.decoder)
self.ckpt_dir = self.config["model_data"]
logging.basicConfig(level=logging.INFO)
self.LOG = logging.getLogger("Seq2Seq")
if tf.io.gfile.listdir(self.ckpt_dir):
self.LOG.info("正在加载模型...")
self.checkpoint.restore(tf.train.latest_checkpoint(self.ckpt_dir))
data = Data(config)
self.dataset, self.tokenizer_in, self.tokenizer_out = data.load()
self.steps_per_epoch = data.steps_per_epoch
def loss_function(self, real, pred):
# 定义损失函数
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# mask掉start,去除start对于loss的干扰
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype) # 将bool型转换成数值
loss_ *= mask
return tf.reduce_mean(loss_)
@tf.function
def training_step(self, inp, targ, targ_lang, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['bos']] * self.batch_size, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
loss += self.loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
step_loss = (loss / int(targ.shape[1]))
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return step_loss
def train(self):
# 定义训练函数
# 从训练语料中读取数据并使用预生成词典word2number的转换
enc_hidden = self.encoder.initialize_hidden_state()
writer = tf.summary.create_file_writer(self.config["log_dir"])
self.LOG.info(f"数据目录: {self.config['data_path']}")
self.LOG.info(f"每个 epoch 训练步数: {self.steps_per_epoch}")
epoch = 0
train_epoch = self.config["epochs"]
while epoch < train_epoch:
total_loss = 0
# 进行一个epoch的训练,训练的步数为steps_per_epoch
iter_data = tqdm(self.dataset.take(self.steps_per_epoch))
for batch, (inp, targ) in enumerate(iter_data):
batch_loss = self.training_step(inp, targ, self.tokenizer_out, enc_hidden)
total_loss += batch_loss
iter_data.set_postfix_str(f"batch_loss: {batch_loss:.4f}")
step_loss = total_loss / self.steps_per_epoch
self.LOG.info(f"Epoch: {epoch+1}/{train_epoch} Loss: {total_loss:.4f} 平均每步 loss {step_loss:.4f}")
# 将本epoch训练的模型进行保存,更新模型文件
self.checkpoint.save(file_prefix=os.path.join(self.ckpt_dir, "ckpt"))
sys.stdout.flush()
epoch = epoch + 1
with writer.as_default():
tf.summary.scalar("loss", step_loss, step=epoch)
def predict(self, sentence):
# 定义预测函数,用于根据上文预测下文对话
# 对输入的语句进行处理,加上start end标示
max_length = self.config["max_length"]
sentence = " ".join(jieba.cut(sentence))
| # -*- coding: utf-8 -*-
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" # Disable Tensorflow debug message
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, enable=True)
class Encoder(tf.keras.Model):
# 定义Encoder类
# 初始化函数,对默认参数进行初始化
def __init__(self, vocab_size, embedding_dim, enc_units, batch_size):
super(Encoder, self).__init__()
self.enc_units = enc_units
self.batch_size = batch_size
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
# 定义调用函数,实现逻辑计算
def call(self, x, hidden):
x_emb = self.embedding(x)
output, state = self.gru(x_emb, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_size, self.enc_units))
class BahdanauAttention(tf.keras.Model):
# 定义bahdanauAttention类,bahdanauAttention是常用的attention实现方法之一
def __init__(self, units):
super(BahdanauAttention, self).__init__()
# 注意力网络的初始化
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# 将query增加一个维度,以便可以与values进行线性相加
hidden_with_time_axis = tf.expand_dims(query, 1)
# 将quales与hidden_with_time_axis进行线性相加后,使用tanh进行非线性变换,最后输出一维的score
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# 使用softmax将score进行概率化转换,转为为概率空间
attention_weights = tf.nn.softmax(score, axis=1)
# 将权重与values(encoder_out)进行相乘,得到context_vector
context_vector = attention_weights * values
# 将乘机后的context_vector按行相加,进行压缩得到最终的context_vector
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
# 初始化batch_sz、dec_units、embedding 、gru 、fc、attention
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, y, hidden, enc_output):
# 首先对enc_output、以及decoder的hidden计算attention,输出上下文语境向量
context_vector, attention_weights = self.attention(hidden, enc_output)
# 对decoder的输入进行embedding
y = self.embedding(y)
# 拼接上下文语境与decoder的输入embedding,并送入gru中
y = tf.concat([tf.expand_dims(context_vector, 1), y], axis=-1)
output, state = self.gru(y)
# 将gru的输出进行维度转换,送入全连接神经网络 得到最后的结果
output = tf.reshape(output, (-1, output.shape[2]))
y = self.fc(output)
return y, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_size, self.dec_units))
class Seq2Seq(object):
def __init__(self, config) -> None:
self.config = config
vacab_size_in = config['vacab_size_in']
vacab_size_out = config['vacab_size_out']
embedding_dim = config['embedding_dim']
self.units = config['layer_size']
self.batch_size = config['batch_size']
self.encoder = Encoder(vacab_size_in, embedding_dim, self.units, self.batch_size)
self.decoder = Decoder(vacab_size_out, embedding_dim, self.units, self.batch_size)
self.optimizer = tf.keras.optimizers.Adam()
# self.optimizer = tf.keras.optimizers.legacy.Adam()
self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, encoder=self.encoder, decoder=self.decoder)
self.ckpt_dir = self.config["model_data"]
logging.basicConfig(level=logging.INFO)
self.LOG = logging.getLogger("Seq2Seq")
if tf.io.gfile.listdir(self.ckpt_dir):
self.LOG.info("正在加载模型...")
self.checkpoint.restore(tf.train.latest_checkpoint(self.ckpt_dir))
data = Data(config)
self.dataset, self.tokenizer_in, self.tokenizer_out = data.load()
self.steps_per_epoch = data.steps_per_epoch
def loss_function(self, real, pred):
# 定义损失函数
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# mask掉start,去除start对于loss的干扰
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype) # 将bool型转换成数值
loss_ *= mask
return tf.reduce_mean(loss_)
@tf.function
def training_step(self, inp, targ, targ_lang, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = self.encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['bos']] * self.batch_size, 1)
for t in range(1, targ.shape[1]):
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output)
loss += self.loss_function(targ[:, t], predictions)
dec_input = tf.expand_dims(targ[:, t], 1)
step_loss = (loss / int(targ.shape[1]))
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return step_loss
def train(self):
# 定义训练函数
# 从训练语料中读取数据并使用预生成词典word2number的转换
enc_hidden = self.encoder.initialize_hidden_state()
writer = tf.summary.create_file_writer(self.config["log_dir"])
self.LOG.info(f"数据目录: {self.config['data_path']}")
self.LOG.info(f"每个 epoch 训练步数: {self.steps_per_epoch}")
epoch = 0
train_epoch = self.config["epochs"]
while epoch < train_epoch:
total_loss = 0
# 进行一个epoch的训练,训练的步数为steps_per_epoch
iter_data = tqdm(self.dataset.take(self.steps_per_epoch))
for batch, (inp, targ) in enumerate(iter_data):
batch_loss = self.training_step(inp, targ, self.tokenizer_out, enc_hidden)
total_loss += batch_loss
iter_data.set_postfix_str(f"batch_loss: {batch_loss:.4f}")
step_loss = total_loss / self.steps_per_epoch
self.LOG.info(f"Epoch: {epoch+1}/{train_epoch} Loss: {total_loss:.4f} 平均每步 loss {step_loss:.4f}")
# 将本epoch训练的模型进行保存,更新模型文件
self.checkpoint.save(file_prefix=os.path.join(self.ckpt_dir, "ckpt"))
sys.stdout.flush()
epoch = epoch + 1
with writer.as_default():
tf.summary.scalar("loss", step_loss, step=epoch)
def predict(self, sentence):
# 定义预测函数,用于根据上文预测下文对话
# 对输入的语句进行处理,加上start end标示
max_length = self.config["max_length"]
sentence = " ".join(jieba.cut(sentence)) | sentence = add_flag(sentence) | 1 | 2023-11-05 12:56:38+00:00 | 4k |
ryanchen01/sing-box-utils | gen_config.py | [
{
"identifier": "surge2singbox",
"path": "surge2singbox.py",
"snippet": "def surge2singbox(surge_config_path):\n singbox_rules = []\n\n with open(surge_config_path, 'r', encoding='utf-8') as f:\n surge_config = f.readlines()\n reg = r'^\\[(.+)\\]$'\n sections = {}\n for linenum, li... | import yaml
import json
import sys
import regex as re
import requests
import argparse
import pathlib
from surge2singbox import surge2singbox
from clash2singbox import clash2singbox | 2,324 |
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate singbox config from clash config proxies and surge config rules")
parser.add_argument("clash_config_path", type=pathlib.Path, help="clash config path")
parser.add_argument("surge_config_path", type=pathlib.Path, help="surge config path")
parser.add_argument("-o", "--output", help="output file name")
parser.add_argument("-us", "--us", action="store_true", help="include US policy")
parser.add_argument("-hk", "--hk", action="store_true", help="include Hong Kong policy")
parser.add_argument("-sg", "--sg", action="store_true", help="include Singapore policy")
parser.add_argument("-jp", "--jp", action="store_true", help="include Japan policy")
parser.add_argument("-tw", "--tw", action="store_true", help="include Taiwan policy")
args = parser.parse_args()
if args.output:
outname = args.output
else:
outname = "singbox.json"
clash_config_path = args.clash_config_path
surge_config_path = args.surge_config_path
bUS = args.us
bHK = args.hk
bSG = args.sg
bJP = args.jp
bTW = args.tw
|
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate singbox config from clash config proxies and surge config rules")
parser.add_argument("clash_config_path", type=pathlib.Path, help="clash config path")
parser.add_argument("surge_config_path", type=pathlib.Path, help="surge config path")
parser.add_argument("-o", "--output", help="output file name")
parser.add_argument("-us", "--us", action="store_true", help="include US policy")
parser.add_argument("-hk", "--hk", action="store_true", help="include Hong Kong policy")
parser.add_argument("-sg", "--sg", action="store_true", help="include Singapore policy")
parser.add_argument("-jp", "--jp", action="store_true", help="include Japan policy")
parser.add_argument("-tw", "--tw", action="store_true", help="include Taiwan policy")
args = parser.parse_args()
if args.output:
outname = args.output
else:
outname = "singbox.json"
clash_config_path = args.clash_config_path
surge_config_path = args.surge_config_path
bUS = args.us
bHK = args.hk
bSG = args.sg
bJP = args.jp
bTW = args.tw
| rule_config, extras = surge2singbox(surge_config_path) | 0 | 2023-11-05 12:35:50+00:00 | 4k |
apple/ml-reed | reed/models/self_supervised_consistency_model.py | [
{
"identifier": "EnvironmentContrastiveBatch",
"path": "reed/data/environment_transition_dataset.py",
"snippet": "class EnvironmentContrastiveBatch:\n \"\"\"\n A batch of triplets where two states/observations are given and one is an augmented version of the other.\n\n The augmentation may be a... | import typing as t
import attr
import torch
import torch.nn as nn
from pathlib import Path
from collections import OrderedDict
from reed.data.environment_transition_dataset import EnvironmentContrastiveBatch
from reed.models.image_encoder import get_image_encoder
from reed.models.self_predictive_representations_model import StateActionSelfPredictiveRepresentationsNetworkEnsemble | 2,845 | image_encoder_architecture: str = "pixl2r",
consistency_comparison_dim: int = 32,
consistency_projection_size: int = 128,
consistency_comparison_hidden_size: int = 256,
consistency_architecture: str = "mosaic",
with_consistency_prediction_head: bool = True,
image_hidden_num_channels: int = 32,
num_layers: int = 3):
"""
Learns embeddings such that the representations of an image and an augmented image are consistent with
one another in the latent space.
Args:
state_size: dimensionality of the states
out_size: the size of the output
state_embed_size: the size of the state's embedding
hidden_size: the size of the hidden layer(s)
ssl_state_encoder_mimics_reward_model: whether the state encoder mimics the reward model's
architecture
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
consistency_comparison_dim: the number of dimensions to use when comparing the predicted augmented state
representation and the actual augmented state representation
consistency_projection_size: the number of hidden units the state representations are projected
consistency_comparison_hidden_size: the number of dimensions to use when comparing the predicted
augmented state representation and the actual augmented state
representation
consistency_architecture: (default = "mosaic") controls the architecture used to predict the augmented
state representation and then to project the current and augmented state
representations before comparing.The name of the architecture references
the source paper. The options are "simsiam" and "mosaic"
with_consistency_prediction_head: (default = True) whether to include a prediction head to
prediction the target representation. When we train with SimCLR we do not
use the prediction head
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the
image encoder
num_layers: the number of hidden layers
"""
super(ImageStateConsistencyNetwork, self).__init__()
assert image_encoder_architecture in {"pixl2r", "drqv2"}
assert consistency_architecture in {"simsiam", "mosaic"}
# track the dimensionality of the input, the output, and the hidden dimensions
self._state_size = state_size
self._out_size = out_size
self._hidden_size = hidden_size
self._num_layers = num_layers
self._image_encoder_architecture = image_encoder_architecture
self._image_hidden_num_channels = image_hidden_num_channels
self._state_embed_size = state_embed_size
self._ssl_state_encoder_mimics_reward_model = ssl_state_encoder_mimics_reward_model
self._consistency_projection_size = consistency_projection_size
self._consistency_comparison_dim = consistency_comparison_dim
self._consistency_comparison_hidden_size = consistency_comparison_hidden_size
self._consistency_architecture = consistency_architecture
self._with_consistency_prediction_head = with_consistency_prediction_head
self._build()
def _build_consistency_comparison_architecture(self) -> t.Tuple[nn.Module, nn.Module]:
"""
Builds the network architecture used to project the current and augmented state representations and then predict
the augmented state representation from the current state representation.
"""
predictor = None
if self._consistency_architecture == "simsiam":
# architecture from the SimSiam code base
# project the predicted and true augmented state representation
projector = nn.Linear(256, self._consistency_projection_size)
# build a 2-layer consistency predictor following:
# https://github.com/facebookresearch/simsiam/blob/a7bc1772896d0dad0806c51f0bb6f3b16d290468/simsiam/builder.py#L39
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.Linear(self._consistency_projection_size,
self._consistency_comparison_hidden_size,
bias=False),
nn.BatchNorm1d(self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True), # hidden layer
nn.Linear(self._consistency_comparison_hidden_size,
self._consistency_projection_size)) # output layer
elif self._consistency_architecture == "mosaic":
# project the predicted and true augmented state representation
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L63
projector = nn.Sequential(
# Rearrange('B T d H W -> (B T) d H W'),
nn.BatchNorm1d(self._state_embed_size), nn.ReLU(inplace=True),
# Rearrange('BT d H W -> BT (d H W)'),
nn.Linear(self._state_embed_size, self._consistency_comparison_hidden_size), nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size)
)
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L118
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.ReLU(inplace=True),
nn.Linear(self._consistency_projection_size, self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size))
else:
raise NotImplementedError(f"{self._consistency_architecture} is not an implemented consistency "
f"comparison architecture.")
return projector, predictor
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
state convolution encoder
augmented state predictor
augmented state projector
"""
# the observations are first encoded with a CNN and then projected to an embedding
# space where they are combined with the action embedding
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
class ImageStateConsistencyNetwork(nn.Module):
def __init__(self,
state_size: t.List[int],
out_size: int = 1,
state_embed_size: int = 256,
hidden_size: int = 256,
ssl_state_encoder_mimics_reward_model: bool = True,
image_encoder_architecture: str = "pixl2r",
consistency_comparison_dim: int = 32,
consistency_projection_size: int = 128,
consistency_comparison_hidden_size: int = 256,
consistency_architecture: str = "mosaic",
with_consistency_prediction_head: bool = True,
image_hidden_num_channels: int = 32,
num_layers: int = 3):
"""
Learns embeddings such that the representations of an image and an augmented image are consistent with
one another in the latent space.
Args:
state_size: dimensionality of the states
out_size: the size of the output
state_embed_size: the size of the state's embedding
hidden_size: the size of the hidden layer(s)
ssl_state_encoder_mimics_reward_model: whether the state encoder mimics the reward model's
architecture
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
consistency_comparison_dim: the number of dimensions to use when comparing the predicted augmented state
representation and the actual augmented state representation
consistency_projection_size: the number of hidden units the state representations are projected
consistency_comparison_hidden_size: the number of dimensions to use when comparing the predicted
augmented state representation and the actual augmented state
representation
consistency_architecture: (default = "mosaic") controls the architecture used to predict the augmented
state representation and then to project the current and augmented state
representations before comparing.The name of the architecture references
the source paper. The options are "simsiam" and "mosaic"
with_consistency_prediction_head: (default = True) whether to include a prediction head to
prediction the target representation. When we train with SimCLR we do not
use the prediction head
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the
image encoder
num_layers: the number of hidden layers
"""
super(ImageStateConsistencyNetwork, self).__init__()
assert image_encoder_architecture in {"pixl2r", "drqv2"}
assert consistency_architecture in {"simsiam", "mosaic"}
# track the dimensionality of the input, the output, and the hidden dimensions
self._state_size = state_size
self._out_size = out_size
self._hidden_size = hidden_size
self._num_layers = num_layers
self._image_encoder_architecture = image_encoder_architecture
self._image_hidden_num_channels = image_hidden_num_channels
self._state_embed_size = state_embed_size
self._ssl_state_encoder_mimics_reward_model = ssl_state_encoder_mimics_reward_model
self._consistency_projection_size = consistency_projection_size
self._consistency_comparison_dim = consistency_comparison_dim
self._consistency_comparison_hidden_size = consistency_comparison_hidden_size
self._consistency_architecture = consistency_architecture
self._with_consistency_prediction_head = with_consistency_prediction_head
self._build()
def _build_consistency_comparison_architecture(self) -> t.Tuple[nn.Module, nn.Module]:
"""
Builds the network architecture used to project the current and augmented state representations and then predict
the augmented state representation from the current state representation.
"""
predictor = None
if self._consistency_architecture == "simsiam":
# architecture from the SimSiam code base
# project the predicted and true augmented state representation
projector = nn.Linear(256, self._consistency_projection_size)
# build a 2-layer consistency predictor following:
# https://github.com/facebookresearch/simsiam/blob/a7bc1772896d0dad0806c51f0bb6f3b16d290468/simsiam/builder.py#L39
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.Linear(self._consistency_projection_size,
self._consistency_comparison_hidden_size,
bias=False),
nn.BatchNorm1d(self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True), # hidden layer
nn.Linear(self._consistency_comparison_hidden_size,
self._consistency_projection_size)) # output layer
elif self._consistency_architecture == "mosaic":
# project the predicted and true augmented state representation
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L63
projector = nn.Sequential(
# Rearrange('B T d H W -> (B T) d H W'),
nn.BatchNorm1d(self._state_embed_size), nn.ReLU(inplace=True),
# Rearrange('BT d H W -> BT (d H W)'),
nn.Linear(self._state_embed_size, self._consistency_comparison_hidden_size), nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size)
)
# from: https://github.com/rll-research/mosaic/blob/561814b40d33f853aeb93f1113a301508fd45274/mosaic/models/rep_modules.py#L118
if self._with_consistency_prediction_head:
predictor = nn.Sequential(
nn.ReLU(inplace=True),
nn.Linear(self._consistency_projection_size, self._consistency_comparison_hidden_size),
nn.ReLU(inplace=True),
nn.Linear(self._consistency_comparison_hidden_size, self._consistency_projection_size),
nn.LayerNorm(self._consistency_projection_size))
else:
raise NotImplementedError(f"{self._consistency_architecture} is not an implemented consistency "
f"comparison architecture.")
return projector, predictor
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
state convolution encoder
augmented state predictor
augmented state projector
"""
# the observations are first encoded with a CNN and then projected to an embedding
# space where they are combined with the action embedding | self._state_conv_encoder = get_image_encoder( | 1 | 2023-11-06 23:14:20+00:00 | 4k |
ApolloAuto/apollo-model-yolox | yolox/models/darknet.py | [
{
"identifier": "BaseConv",
"path": "yolox/models/network_blocks.py",
"snippet": "class BaseConv(nn.Module):\n \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n def __init__(\n self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n ):\n s... | from torch import nn
from .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck | 2,178 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
class Darknet(nn.Module):
# number of blocks from dark2 to dark5.
depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}
def __init__(
self,
depth,
in_channels=3,
stem_out_channels=32,
out_features=("dark3", "dark4", "dark5"),
):
"""
Args:
depth (int): depth of darknet used in model, usually use [21, 53] for this param.
in_channels (int): number of input channels, for example, use 3 for RGB image.
stem_out_channels (int): number of output channels of darknet stem.
It decides channels of darknet layer2 to layer5.
out_features (Tuple[str]): desired output layer name.
"""
super().__init__()
assert out_features, "please provide output features of Darknet"
self.out_features = out_features
self.stem = nn.Sequential(
BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"),
*self.make_group_layer(stem_out_channels, num_blocks=1, stride=2),
)
in_channels = stem_out_channels * 2 # 64
num_blocks = Darknet.depth2blocks[depth]
# create darknet with `stem_out_channels` and `num_blocks` layers.
# to make model structure more clear, we don't use `for` statement in python.
self.dark2 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[0], stride=2)
)
in_channels *= 2 # 128
self.dark3 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[1], stride=2)
)
in_channels *= 2 # 256
self.dark4 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[2], stride=2)
)
in_channels *= 2 # 512
self.dark5 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[3], stride=2),
*self.make_spp_block([in_channels, in_channels * 2], in_channels * 2),
)
def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1):
"starts with conv layer then has `num_blocks` `ResLayer`"
return [
BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"),
*[(ResLayer(in_channels * 2)) for _ in range(num_blocks)],
]
def make_spp_block(self, filters_list, in_filters):
m = nn.Sequential(
*[
BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"),
BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"),
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) Megvii Inc. All rights reserved.
class Darknet(nn.Module):
# number of blocks from dark2 to dark5.
depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}
def __init__(
self,
depth,
in_channels=3,
stem_out_channels=32,
out_features=("dark3", "dark4", "dark5"),
):
"""
Args:
depth (int): depth of darknet used in model, usually use [21, 53] for this param.
in_channels (int): number of input channels, for example, use 3 for RGB image.
stem_out_channels (int): number of output channels of darknet stem.
It decides channels of darknet layer2 to layer5.
out_features (Tuple[str]): desired output layer name.
"""
super().__init__()
assert out_features, "please provide output features of Darknet"
self.out_features = out_features
self.stem = nn.Sequential(
BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"),
*self.make_group_layer(stem_out_channels, num_blocks=1, stride=2),
)
in_channels = stem_out_channels * 2 # 64
num_blocks = Darknet.depth2blocks[depth]
# create darknet with `stem_out_channels` and `num_blocks` layers.
# to make model structure more clear, we don't use `for` statement in python.
self.dark2 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[0], stride=2)
)
in_channels *= 2 # 128
self.dark3 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[1], stride=2)
)
in_channels *= 2 # 256
self.dark4 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[2], stride=2)
)
in_channels *= 2 # 512
self.dark5 = nn.Sequential(
*self.make_group_layer(in_channels, num_blocks[3], stride=2),
*self.make_spp_block([in_channels, in_channels * 2], in_channels * 2),
)
def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1):
"starts with conv layer then has `num_blocks` `ResLayer`"
return [
BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"),
*[(ResLayer(in_channels * 2)) for _ in range(num_blocks)],
]
def make_spp_block(self, filters_list, in_filters):
m = nn.Sequential(
*[
BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"),
BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), | SPPBottleneck( | 5 | 2023-11-08 07:07:24+00:00 | 4k |
indiefan/king_smith | custom_components/king_smith/number.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/king_smith/const.py",
"snippet": "DOMAIN = \"king_smith\""
},
{
"identifier": "WalkingPadCoordinator",
"path": "custom_components/king_smith/coordinator.py",
"snippet": "class WalkingPadCoordinator(DataUpdateCoordinator[None]):\n \"... | from datetime import timedelta
from homeassistant.components.number import (
NumberEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import WalkingPadCoordinator
from .entity import WalkingPadEntity
from .walking_pad import WalkingPadApi | 2,274 |
"""Walking Pad Number Entities."""
SCAN_INTERVAL = timedelta(seconds=5)
KPH_TO_MPH = 0.621371
MIN_VALUE = 0.0
MAX_VALUE = 4.0
STEP = 0.1
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entity."""
name = config_entry.data.get(CONF_NAME) or DOMAIN
data = hass.data[DOMAIN][config_entry.entry_id]
entity = WalkingPadSpeed(name, data["device"], data["coordinator"])
async_add_entities([entity])
|
"""Walking Pad Number Entities."""
SCAN_INTERVAL = timedelta(seconds=5)
KPH_TO_MPH = 0.621371
MIN_VALUE = 0.0
MAX_VALUE = 4.0
STEP = 0.1
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entity."""
name = config_entry.data.get(CONF_NAME) or DOMAIN
data = hass.data[DOMAIN][config_entry.entry_id]
entity = WalkingPadSpeed(name, data["device"], data["coordinator"])
async_add_entities([entity])
| class WalkingPadSpeed(WalkingPadEntity, NumberEntity): | 2 | 2023-11-03 20:45:03+00:00 | 4k |
ndiamant/spice | tests/test_conditional_histogram.py | [
{
"identifier": "select_bins",
"path": "spice/conditional_histogram.py",
"snippet": "def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:\n return unique_quantile(y, n_bins, first_bin_zero=False)"
},
{
"identifier": "discretize",
"path": "spice/conditional_histogram.py",
"sn... | import torch
from spice.conditional_histogram import (
select_bins, discretize, ConditionalHist, integrate_categorical_below_threshold, find_hpd_cutoffs,
) | 1,832 |
def test_select_bins():
y = torch.linspace(0, 1, 100)
bins = select_bins(y, n_bins=5)
binned = discretize(y, bins)
_, counts = torch.unique(binned, return_counts=True)
# make sure bins equally divide the data
assert len(set(counts.numpy())) == 1
def test_discretize():
n_bins = 5
bins = torch.linspace(0, 1, n_bins)
y = torch.tensor([
[0, 0.3, 0.9],
[0.05, 0.31, 0.91],
])
assert (discretize(y, bins) == torch.tensor([
[0, 2, 4],
[1, 2, 4],
])).all()
def test_conditional_hist():
d = 5
bsz = 2
n_bins = 7
|
def test_select_bins():
y = torch.linspace(0, 1, 100)
bins = select_bins(y, n_bins=5)
binned = discretize(y, bins)
_, counts = torch.unique(binned, return_counts=True)
# make sure bins equally divide the data
assert len(set(counts.numpy())) == 1
def test_discretize():
n_bins = 5
bins = torch.linspace(0, 1, n_bins)
y = torch.tensor([
[0, 0.3, 0.9],
[0.05, 0.31, 0.91],
])
assert (discretize(y, bins) == torch.tensor([
[0, 2, 4],
[1, 2, 4],
])).all()
def test_conditional_hist():
d = 5
bsz = 2
n_bins = 7 | m = ConditionalHist(d, 3, max_iter=10, bins=torch.linspace(0, 1, n_bins), y_min=0) | 2 | 2023-11-01 18:04:29+00:00 | 4k |
4darsh-Dev/AyurVichar | home/views.py | [
{
"identifier": "PrakrutiResult",
"path": "home/models.py",
"snippet": "class PrakrutiResult(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n vata_score = models.IntegerField()\n pitta_score = models.IntegerField()\n kapha_score = models.IntegerField()\n prakrut... | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
from django.contrib.auth import authenticate,login, logout
from django.contrib import messages
from .models import PrakrutiResult
from .forms import PrakrutiForm | 2,386 | },
{
'id': 'kapha_question15',
'question': 'In comparison to others do you pass urine & stool in large quantities and do you perspire more? ',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question16',
'question': 'Do your friends complain about bad smell being emitted from mouth or body?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question17',
'question': 'Do you think you have intense sexual desire? ',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'vata_question1',
'question': 'Whether your skin remains dry throughout the year in comparison to others?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question2',
'question': 'Is your body undernourished/emaciated ?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question3',
'question': 'Have you got rough, low, broken or obstructed voice Does Your sleep last less than 6 hours per day Or you sleep can be disturbed easily?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question4',
'question': 'Do you change walking speed & style from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question5',
'question': 'Do you keep changing your food habits from time to time? ',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question6',
'question': 'Do you keep changingyour walking / jogging habit from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question7',
'question': 'Do you keep your joints, eyes, eyebrows, jaw, lips. tongue, head, Shoulder, hands & feet frequently moving?',
'attribute': 'vata',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'vata_question8',
'question': 'Are you considered a talkative among your friends?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question9',
'question': 'Do you have prominent veins & tendons all over the body?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question10',
'question': 'Do you generally start the work assigned to you immediately?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
{
'id': 'vata_question11',
'question': 'Do you get irritated easily? (E.g., when you do not get breakfast on time in your hostel or when the power goes off while watching a cricket match on your TV ?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
]
if request.method == 'POST':
|
# added by adarsh
# from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, "index.html")
def about(request):
return render(request, "about.html")
def services(request):
return render(request, "services.html" )
def contact(request):
return render(request, "contact.html")
def loginUser(request):
if request.method == 'POST':
username = request.POST.get("username")
password = request.POST.get("password")
# Matching user credentials
user = authenticate(username = username, password = password)
if user is not None:
# dj authenticating id and pass
login(request, user)
return redirect('/prakruti')
else:
# No backend authenticated the credentials
error_message = "Invalid username or password! Please try again."
messages.error(request, error_message)
# return render(request, 'login.html')
return render(request, "login.html", {"error_message" : error_message} )
return render(request, "login.html")
def registerUser(request):
if request.method == "POST":
username = request.POST.get("username")
email = request.POST.get("email")
pass1 = request.POST.get("password")
pass2 = request.POST.get("cnf-password")
# Check for username
if len(username) >10:
error_msg1 = "Username must not be more than 10 characters"
messages.error(request, error_msg1)
return render(request, "signup.html", {"error_message" : error_msg1})
# check for alphanumeric
if (not username.isalnum()):
error_msg2 = "Username must be alpha-numeric"
messages.error(request, error_msg2)
return render(request, "register.html", {"error_message": error_msg2})
# Checking for passwords match
if pass1 != pass2:
error_msg3 = "Passwords don't match!"
messages.error(request, error_msg3)
return render(request, "register.html", {"error_message" : error_msg3})
# Checking for already existing users
if (User.objects.filter(username=username).exists()):
error_msg4 = "Username already taken! Please choose different one."
messages.error(request, error_msg4)
return render(request, "register.html", {"error_message": error_msg4})
# Check for duplicated email
if (User.objects.filter(email=email).exists()):
error_msg5 = "Email already taken! Please choose different one."
messages.error(request, error_msg5)
return render(request, "register.html", {"error_message ": error_msg5})
# Creating user
myUser = User.objects.create_user(username, email, pass2)
myUser.save()
success_msg = "Your a/c has been created successfully! "
messages.success(request, success_msg)
return redirect('/prakruti')
return render(request, "register.html")
def logoutUser(request):
auth.logout(request)
return redirect("home")
def profile(request):
return render(request, "profile.html", {"prakruti_type" : prakruti_type})
# def prakruti(request):
# return render(request, "prakruti.html")
def prakriti_request(request):
questions = [
{
'id': 'kapha_question1',
'question': 'Whether your skin remains oily throughout the year in comparison to others?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question2',
'question': 'Are your body-hairs & skin shiny, even no oil or moisturizer is used?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question3',
'question': 'Are you considered attractive among your friends?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question4',
'question': 'Do even mild or trivial injuries on your body make you upset?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question5',
'question': 'Among your family members, Is your complexion considered fairer?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question6',
'question': 'Do you think you have intense sexual desire ?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question7',
'question': 'Have got well built muscles?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question8',
'question': 'Do you change your body posture frequently? (You cannot manage yourself in a single posture for longer duration) ?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 60,
},
{
'id': 'kapha_question9',
'question': 'Do you have well-nourished normally developed body? (You are neither malnourished nor obese)',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question10',
'question': 'Are you lazy and disinterested in activities like morning walk/jogging , swimming or any type of outdoor games ?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question11',
'question': 'Are you slow in consuming food?(Even all have left the dining hall, you are still consuming the same amount of food.',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question12',
'question': 'When you to morning walk or college or office, do walk slowly in comparison to others?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question13',
'question': 'If you are assigned any work, do you take some estra time to start it?',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question14',
'question': "Do you get imitated easily? (For example, when you don't get breaktast on time in your hostel or when the power goes off while watching a cricket match or your favorite movie on television)",
'attribute': 'kapha',
'yes_points': 0,
'no_points': 40,
},
{
'id': 'kapha_question15',
'question': 'Are you late to develop suffer from symptoms after exposure to common causative factors? (For example, during seasonal changes, when your friends are easily caught up with flu etc., you are still healthy among them)',
'attribute': 'kapha',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'kapha_question16',
'question': 'Does your gait (style of walking) change with respect to speed or manner frequently?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'kapha_question17',
'question': 'Do you feel hungry more frequently and do you? consume more food in comparison to others?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 30,
},
{
'id': 'kapha_question18',
'question': 'Do you tolerate heat easily?',
'attribute': 'kapha',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'kapha_question19',
'question': 'Do you consume liquids in more quantity and frequency in comparison to others? ',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 30,
},
{
'id': 'kapha_question20',
'question': 'Do you perspire less in comparison to others?',
'attribute': 'kapha',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'kapha_question21',
'question': 'Are sounds produced frequently in your joints on movements?',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'kapha_question22',
'question': 'Have you got a good/attractive complexion?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question23',
'question': 'Have you got sweet & pleasant voice?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'pitta_question1',
'question': 'Are you more comfortable in winter than summers?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question2',
'question': 'Among your family members, is your complexion considered fairer? ',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question3',
'question': 'Does your temperature of oral cavity remain towards upper limit of normal range?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question4',
'question': 'Do you have excessive black moles. Freckles etc ou your skin? Or Have you noticed new appearance of black moles often on your skin?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question5',
'question': 'Do you feel excessive hunger & thirst in comparison to others?',
'attribute': 'pitta',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'pitta_question6',
'question': 'Have you experienced premature graying, wrinkling of skin & early baldness?',
'attribute': 'kapha',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'kapha_question7',
'question': 'Do you have soft, scanty, brown hair on your face. body & head?',
'attribute': 'kapha',
'yes_points': 17,
'no_points': 0,
},
{
'id': 'kapha_question8',
'question': 'Do you involve yourself in risky & heroic activities requiring physical strength often?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question9',
'question': 'Do you have ability to digest lage quantities of food easily?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question10',
'question': 'Do you take large quantities of food & drink in comparison to others?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question11',
'question': 'Do you have soft, scanty, brown hair on your face. body & head?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question12',
'question': 'Do you get easily irritated for small/negligible problem in day-to-day life?',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question13',
'question': 'Do you consume food more frequently than others? (5-6 times/day)',
'attribute': 'kapha',
'yes_points': 24,
'no_points': 0,
},
{
'id': 'kapha_question14',
'question': ' Do you have soft & loose muscle bulk especially around the joints?',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question15',
'question': 'In comparison to others do you pass urine & stool in large quantities and do you perspire more? ',
'attribute': 'kapha',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'kapha_question16',
'question': 'Do your friends complain about bad smell being emitted from mouth or body?',
'attribute': 'kapha',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'kapha_question17',
'question': 'Do you think you have intense sexual desire? ',
'attribute': 'kapha',
'yes_points': 0,
'no_points': 120,
},
{
'id': 'vata_question1',
'question': 'Whether your skin remains dry throughout the year in comparison to others?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question2',
'question': 'Is your body undernourished/emaciated ?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question3',
'question': 'Have you got rough, low, broken or obstructed voice Does Your sleep last less than 6 hours per day Or you sleep can be disturbed easily?',
'attribute': 'vata',
'yes_points': 30,
'no_points': 0,
},
{
'id': 'vata_question4',
'question': 'Do you change walking speed & style from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question5',
'question': 'Do you keep changing your food habits from time to time? ',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question6',
'question': 'Do you keep changingyour walking / jogging habit from time to time?',
'attribute': 'vata',
'yes_points': 40,
'no_points': 0,
},
{
'id': 'vata_question7',
'question': 'Do you keep your joints, eyes, eyebrows, jaw, lips. tongue, head, Shoulder, hands & feet frequently moving?',
'attribute': 'vata',
'yes_points': 120,
'no_points': 0,
},
{
'id': 'vata_question8',
'question': 'Are you considered a talkative among your friends?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question9',
'question': 'Do you have prominent veins & tendons all over the body?',
'attribute': 'vata',
'yes_points': 60,
'no_points': 0,
},
{
'id': 'vata_question10',
'question': 'Do you generally start the work assigned to you immediately?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
{
'id': 'vata_question11',
'question': 'Do you get irritated easily? (E.g., when you do not get breakfast on time in your hostel or when the power goes off while watching a cricket match on your TV ?',
'attribute': 'vata',
'yes_points': 15,
'no_points': 0,
},
]
if request.method == 'POST': | form = PrakrutiForm(request.POST) | 1 | 2023-11-04 10:16:05+00:00 | 4k |
nik-sm/com-hom-emg | scripts/plots.py | [
{
"identifier": "CANONICAL_COORDS_STR",
"path": "com_hom_emg/scoring.py",
"snippet": "CANONICAL_COORDS_STR = []"
},
{
"identifier": "PROJECT_PATH",
"path": "com_hom_emg/utils.py",
"snippet": "PROJECT_PATH = Path(__file__).parent.parent"
}
] | import argparse
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from pathlib import Path
from typing import List, Optional
from loguru import logger
from com_hom_emg.scoring import CANONICAL_COORDS_STR
from com_hom_emg.utils import PROJECT_PATH | 3,421 | # We have ~85K fake doubles total, each has 1 median.
# Then we have 50 independent runs. Here we average over all 50*85K items
output_dir = figs_dir / f"{which_expt}.similarity_matrices"
output_dir.mkdir(exist_ok=True)
print(f"Table describing feature similarity, for: {which_expt}")
print()
rows = []
print("group_name, real_to_real, fake_to_fake, real_to_fake, non_matching")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
scalar_sim_values = [summarize_similarity_matrix(m) for m in similarity_matrices]
real_reals = [s[0] for s in scalar_sim_values]
fake_fakes = [s[1] for s in scalar_sim_values]
real_fakes = [s[2] for s in scalar_sim_values]
nonmatches = [s[3] for s in scalar_sim_values]
real_to_real = f"{round(np.mean(real_reals), 2)} ± {round(np.std(real_reals), 2)}"
fake_to_fake = f"{round(np.mean(fake_fakes), 2)} ± {round(np.std(fake_fakes), 2)}"
real_to_fake = f"{round(np.mean(real_fakes), 2)} ± {round(np.std(real_fakes), 2)}"
nonmatch = f"{round(np.mean(nonmatches), 2)} ± {round(np.std(nonmatches), 2)}"
string = ", ".join([str(group_name), real_to_real, fake_to_fake, real_to_fake, nonmatch])
print(string)
rows.append(
{
"group_name": str(group_name),
"real_to_real": real_to_real,
"fake_to_fake": fake_to_fake,
"real_to_fake": real_to_fake,
"non_matching": nonmatch,
}
)
print()
# Save these summary statistics to a CSV so we can emit a latex table later
table_df = pd.DataFrame(rows)
table_df.to_csv(figs_dir / f"{title}.similarity_values.{gamma}.csv", index=False)
print(f"Figures with average similarity heatmap for each group, for: {which_expt}")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
ticktext = group["ticktext"].iloc[0]
avg_similarity_matrix = np.nanmean(similarity_matrices, 0)
fig = make_similarity_heatmap_plot(avg_similarity_matrix, ticktext)
filename = f"{which_expt}.{group_name.replace('<br>', '__')}.similarity_matrix.png"
fig.write_image(output_dir / filename, scale=2)
def main(figs_dir: Path, which_test: str, which_expt: str, suffix: str, gamma: Optional[float]):
logger.info(f"Saving figures to: {figs_dir}")
if suffix is None:
title = f"{which_test}.{which_expt}"
else:
title = f"{which_test}.{which_expt}.{suffix}"
logger.info(f"Loading data for: {title}")
df = pd.read_pickle(figs_dir / f"{title}.pkl")
# Add group name for convenient grouping later
logger.info("NOTE - not including encoder arch in group names (only used basic)")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
# Unify column naming from fine-tuning and fresh-classifier experiments
col_rename_map = {}
for subset in ["single", "double", "overall"]:
for scenario in ["augmented", "lower_bound", "upper_bound", "zero_shot"]:
col_rename_map[f"test_{scenario}/{subset}_bal_acc"] = f"{scenario}.{subset}_bal_acc"
df = df.rename(columns=col_rename_map)
# Make plots
make_confusion_matrices(df, figs_dir, which_test, title)
# make_boxplots(df, figs_dir, which_test, title)
# NOTE - this part will get re-run a few times, but it is fine
# (Because it doesn't depend on fine-tune vs fresh-classifier)
# As long as this script is run once for "regular" and once for "ablation", it is enough
df = pd.read_pickle(figs_dir / f"feature_similarity.{which_expt}.{gamma}.pkl")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
make_feature_similarity_plots(df, figs_dir, which_expt, title, gamma)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--figs_dir", default="figures")
parser.add_argument("--which_test", required=True, choices=["finetune", "fresh-classifier"])
parser.add_argument("--which_expt", required=True, choices=["regular", "ablation"])
parser.add_argument("--suffix", default=None) # e.g. "lda.None" or "logr.1000"
parser.add_argument("--gamma", default=None, type=float)
args = parser.parse_args()
if args.which_test == "fresh-classifier":
if args.suffix is None:
raise ValueError("Must specify suffix for fresh-classifier test")
|
layout_template = "simple_white"
colors = px.colors.qualitative.Plotly
def plot_confusion_matrix(data: np.ndarray, title: Optional[str] = None):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
text = make_text(data)
# Eliminate the final row, which corresponds to actual label = "None, None"
data = data[:-1]
text = text[:-1]
ticktext = CANONICAL_COORDS_STR
fig = go.Figure()
showscale = False
margin = dict(l=20, r=0, t=0, b=20)
if title is not None:
fig.update_layout(title=title)
margin = dict(l=20, r=0, t=20, b=20)
fig.add_trace(
go.Heatmap(
z=data,
text=text,
texttemplate="%{text}",
zmin=0,
zmax=1,
colorscale="Blues",
showscale=showscale,
textfont_size=15,
)
)
fig.update_layout(
margin=margin,
xaxis=dict(
title="Predicted",
tickangle=-45,
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
constrain="domain",
),
yaxis=dict(
title="Actual",
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
font_size=15,
)
full_fig = fig.full_figure_for_development(warn=False)
x_lo, x_hi = full_fig.layout.xaxis.range
y_hi, y_lo = full_fig.layout.yaxis.range # NOTE - y-axis range is reversed for heatmap
box_size = (y_hi - y_lo) / data.shape[0]
# Add horizontal line between single and combo classes
n = 8 # 8 single classes above the line
x = [x_lo, x_hi]
y_value = y_hi - n * box_size
y = [y_value, y_value]
y = [y_hi - y_ + y_lo for y_ in y] # reverse coords
fig.add_trace(go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False))
# Add vertical line between single and combo classes
n = 8 # 8 single classes left of the line
x_value = x_lo + n * box_size
x = [x_value, x_value]
y = [y_hi, y_lo]
fig.add_trace(go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False))
# Add verticla line between combo classes and 'None' class
n = 24 # 24 classes left of the line
x_value = x_lo + n * box_size
x = [x_value, x_value]
y = [y_hi, y_lo]
fig.add_trace(go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False))
# Need to re-set the axis ranges after adding lines
fig.update_layout(xaxis_range=[x_lo, x_hi], yaxis_range=[y_hi, y_lo], yaxis_autorange=False)
return fig
def make_boxplots(df: pd.DataFrame, figs_dir, which_test: str, title: str):
# Make 3 large figures: one on single acc, one on double acc, and one on overall acc
# In each figure:
# The figure contains grouped boxplots.
# Each boxplot group is a particular setting of encoder, classifier, feature combine type, loss type
# Within each boxplot group, we have a box for "augmented", "upper", "lower". In the case of fine-tuning,
# we also have a box for "zero-shot"
# First, unify column naming. In fine-tune, names are "test_augmented/overall_bal_acc", etc
# In fresh-classifier, names are "augmented.overall_bal_acc", etc
# Stick to the latter.
output_dir = figs_dir / f"{title}.boxplots"
output_dir.mkdir(exist_ok=True)
for subset in ["single", "double", "overall"]:
fig = go.Figure()
names_cols = [
("augmented", f"augmented.{subset}_bal_acc"),
("upper_bound", f"upper_bound.{subset}_bal_acc"),
("lower_bound", f"lower_bound.{subset}_bal_acc"),
]
if which_test == "finetune":
names_cols.append(("zero_shot", f"zero_shot.{subset}_bal_acc"))
for i, (name, col) in enumerate(names_cols):
data = df[col]
x = df["group_name"]
kw = dict(jitter=0.5, marker_size=3, marker_color=colors[i])
trace = go.Box(y=data, x=x, name=name, **kw)
fig.add_trace(trace)
fig.update_layout(
boxmode="group",
template=layout_template,
yaxis=dict(range=[0, 1], title="Balanced Test Acc"),
xaxis_title="Classifier // Feature Combine Type // Loss Type",
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
# boxgap=0.25, # Space between groups
# boxgroupgap=0, # Space between boxes in a group
margin=dict(l=0, r=0, t=0, b=0),
font_size=15,
)
fig.write_image(output_dir / f"{title}.{subset}.png", width=1200, height=600, scale=2)
def make_confusion_matrices(df: pd.DataFrame, figs_dir, which_test: str, title: str):
# Group by method details, and then average across folds and seeds
# Create a single confusion matrix using plot_confusion_matrix
# Save to file
output_dir = figs_dir / f"{title}.confusion_matrices"
output_dir.mkdir(exist_ok=True)
names = ["augmented", "upper_bound", "lower_bound"]
if which_test == "finetune":
names.append("zero_shot")
for group_name, group in df.groupby("group_name"):
for name in names:
col = f"{name}.confusion_matrix"
this_group_conf_mats = np.stack(group[col])
avg_conf_mat = np.nanmean(this_group_conf_mats, 0)
fig = plot_confusion_matrix(avg_conf_mat) # , title=f"{group_name} // {name}")
filename = f"{title}.{group_name.replace('<br>', '__')}.{name}.conf_mat.png"
fig.write_image(output_dir / filename, width=1000, height=1000, scale=2)
def plot_heatmap(data: np.ndarray, ticktext: List[str]):
def make_text(cm):
text = []
for v in cm.flatten():
text.append(f"{round(v, 2)}")
return np.array(text).reshape(cm.shape)
# Get lower triangular
data = np.copy(data)
data[np.triu_indices(data.shape[0], k=1)] = None
text = make_text(data)
fig = go.Figure()
fig.update_layout(
# margin=margin,
template=layout_template,
xaxis=dict(
tickangle=-45,
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
constrain="domain",
),
yaxis=dict(
tickmode="array",
ticktext=ticktext,
tickvals=list(range(len(ticktext))),
autorange="reversed",
scaleanchor="x",
scaleratio=1,
constrain="domain",
),
width=1000,
height=1000,
margin=dict(l=0, r=0, t=0, b=0),
font_size=15,
)
fig.add_trace(
go.Heatmap(z=data, text=text, texttemplate="%{text}", zmin=0, zmax=1, colorscale="Greens", showscale=False)
)
return fig
def make_similarity_heatmap_plot(similarity_matrix, ticktext):
fig = plot_heatmap(similarity_matrix, ticktext)
full_fig = fig.full_figure_for_development(warn=False)
x_lo, x_hi = full_fig.layout.xaxis.range
y_hi, y_lo = full_fig.layout.yaxis.range # NOTE - y-axis range is reversed for heatmap
n_classes = len(ticktext)
box_size = (y_hi - y_lo) / n_classes
# Add a line after the single gesture classes
def add_hline(n):
# Line from the y-axis, travling horizontall, until it hits the diagonal
x = [x_lo, x_lo + n * box_size]
# compute y-values in the normal way
y = [y_hi - n * box_size, y_hi - n * box_size]
# Then adjust y values to account for reversed axis
y = [y_hi - y_ + y_lo for y_ in y]
fig.add_trace(
go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False)
)
def add_vline(n):
# Line from the diagonal, traveling vertically down, until it hits x-axis
# after moving over n boxes, the y value of the diagonal is
x = [x_lo + n * box_size, x_lo + n * box_size]
# compute y-values in the normal way
y = [y_hi - n * box_size, y_lo]
# Then adjust y values to account for reversed axis
y = [y_hi - y_ + y_lo for y_ in y]
fig.add_trace(
go.Scatter(x=x, y=y, mode="lines", line=dict(color="black", dash="dot", width=4), showlegend=False)
)
# Add lines for easier interpretation
# p fig.full_figure_for_development(warn=False).layout.yaxis.range
add_hline(16)
add_vline(16)
# Need to re-set the axis ranges after adding lines
fig.update_layout(xaxis_range=[x_lo, x_hi], yaxis_range=[y_hi, y_lo], yaxis_autorange=False)
return fig
def summarize_similarity_matrix(similarity_matrix: np.ndarray):
# Extract 4 numbers of interest:
# - avg of first 16 elements of diag -> describes real-real similarity
# - avg of final 16 elements of diag -> fake-fake sim
# - avg of 16th subdiagonal -> real-fake sim
# - avg of all other below-diagonal elements -> non-matching sim
real_real_sim = np.nanmean(np.diag(similarity_matrix)[:16])
fake_fake_sim = np.nanmean(np.diag(similarity_matrix)[16:])
real_fake_sim = np.nanmean(np.diag(similarity_matrix, k=-16))
# We want to get the avg of below-diagonal entries, except for a certain subdiagonal.
# Add them all up, subtract that subdiagonal, and divide by number of items
tril = similarity_matrix[np.triu_indices(similarity_matrix.shape[0], k=1)]
stripe = np.diag(similarity_matrix, k=-16)
nonmatch_sim = (np.nansum(tril) - np.nansum(stripe)) / (len(tril) - len(stripe))
return real_real_sim, fake_fake_sim, real_fake_sim, nonmatch_sim
def make_feature_similarity_plots(df, figs_dir, which_expt, title: str, gamma: Optional[float]):
# NOTE - For each fake double, we computed the median distance to matching real doubles
# This gives us the median of ~40 distances for each point.
# We have ~85K fake doubles total, each has 1 median.
# Then we have 50 independent runs. Here we average over all 50*85K items
output_dir = figs_dir / f"{which_expt}.similarity_matrices"
output_dir.mkdir(exist_ok=True)
print(f"Table describing feature similarity, for: {which_expt}")
print()
rows = []
print("group_name, real_to_real, fake_to_fake, real_to_fake, non_matching")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
scalar_sim_values = [summarize_similarity_matrix(m) for m in similarity_matrices]
real_reals = [s[0] for s in scalar_sim_values]
fake_fakes = [s[1] for s in scalar_sim_values]
real_fakes = [s[2] for s in scalar_sim_values]
nonmatches = [s[3] for s in scalar_sim_values]
real_to_real = f"{round(np.mean(real_reals), 2)} ± {round(np.std(real_reals), 2)}"
fake_to_fake = f"{round(np.mean(fake_fakes), 2)} ± {round(np.std(fake_fakes), 2)}"
real_to_fake = f"{round(np.mean(real_fakes), 2)} ± {round(np.std(real_fakes), 2)}"
nonmatch = f"{round(np.mean(nonmatches), 2)} ± {round(np.std(nonmatches), 2)}"
string = ", ".join([str(group_name), real_to_real, fake_to_fake, real_to_fake, nonmatch])
print(string)
rows.append(
{
"group_name": str(group_name),
"real_to_real": real_to_real,
"fake_to_fake": fake_to_fake,
"real_to_fake": real_to_fake,
"non_matching": nonmatch,
}
)
print()
# Save these summary statistics to a CSV so we can emit a latex table later
table_df = pd.DataFrame(rows)
table_df.to_csv(figs_dir / f"{title}.similarity_values.{gamma}.csv", index=False)
print(f"Figures with average similarity heatmap for each group, for: {which_expt}")
for group_name, group in df.groupby("group_name"):
similarity_matrices = np.stack(group["similarity_matrix"])
ticktext = group["ticktext"].iloc[0]
avg_similarity_matrix = np.nanmean(similarity_matrices, 0)
fig = make_similarity_heatmap_plot(avg_similarity_matrix, ticktext)
filename = f"{which_expt}.{group_name.replace('<br>', '__')}.similarity_matrix.png"
fig.write_image(output_dir / filename, scale=2)
def main(figs_dir: Path, which_test: str, which_expt: str, suffix: str, gamma: Optional[float]):
logger.info(f"Saving figures to: {figs_dir}")
if suffix is None:
title = f"{which_test}.{which_expt}"
else:
title = f"{which_test}.{which_expt}.{suffix}"
logger.info(f"Loading data for: {title}")
df = pd.read_pickle(figs_dir / f"{title}.pkl")
# Add group name for convenient grouping later
logger.info("NOTE - not including encoder arch in group names (only used basic)")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
# Unify column naming from fine-tuning and fresh-classifier experiments
col_rename_map = {}
for subset in ["single", "double", "overall"]:
for scenario in ["augmented", "lower_bound", "upper_bound", "zero_shot"]:
col_rename_map[f"test_{scenario}/{subset}_bal_acc"] = f"{scenario}.{subset}_bal_acc"
df = df.rename(columns=col_rename_map)
# Make plots
make_confusion_matrices(df, figs_dir, which_test, title)
# make_boxplots(df, figs_dir, which_test, title)
# NOTE - this part will get re-run a few times, but it is fine
# (Because it doesn't depend on fine-tune vs fresh-classifier)
# As long as this script is run once for "regular" and once for "ablation", it is enough
df = pd.read_pickle(figs_dir / f"feature_similarity.{which_expt}.{gamma}.pkl")
df["group_name"] = df["clf_arch"] + "<br>" + df["feature_combine_type"] + "<br>" + df["loss_type"]
if which_expt == "ablation":
df["group_name"] = (
df["group_name"].astype(str)
+ "<br>("
+ df["linearity_loss_coeff"].astype(str)
+ ","
+ df["real_CE_loss_coeff"].astype(str)
+ ","
+ df["fake_CE_loss_coeff"].astype(str)
+ ","
+ df["data_noise_SNR"].astype(str)
+ ")"
)
make_feature_similarity_plots(df, figs_dir, which_expt, title, gamma)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--figs_dir", default="figures")
parser.add_argument("--which_test", required=True, choices=["finetune", "fresh-classifier"])
parser.add_argument("--which_expt", required=True, choices=["regular", "ablation"])
parser.add_argument("--suffix", default=None) # e.g. "lda.None" or "logr.1000"
parser.add_argument("--gamma", default=None, type=float)
args = parser.parse_args()
if args.which_test == "fresh-classifier":
if args.suffix is None:
raise ValueError("Must specify suffix for fresh-classifier test") | figs_dir = PROJECT_PATH / args.figs_dir | 1 | 2023-11-01 21:12:05+00:00 | 4k |
alengwenus/ha-sma-ev-charger | custom_components/smaev/sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/smaev/const.py",
"snippet": "DOMAIN = \"smaev\""
},
{
"identifier": "SMAEV_COORDINATOR",
"path": "custom_components/smaev/const.py",
"snippet": "SMAEV_COORDINATOR = \"coordinator\""
},
{
"identifier": "SMAEV_DEVICE_INFO",
"... | from dataclasses import dataclass, field
from typing import TYPE_CHECKING
from pysmaev.const import SmaEvChargerMeasurements
from pysmaev.helpers import get_measurements_channel, get_parameters_channel
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
EntityCategory,
UnitOfElectricCurrent,
UnitOfElectricPotential,
UnitOfEnergy,
UnitOfFrequency,
UnitOfPower,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
DOMAIN,
SMAEV_COORDINATOR,
SMAEV_DEVICE_INFO,
SMAEV_MEASUREMENT,
SMAEV_PARAMETER,
SMAEV_VALUE,
)
import logging | 1,706 | key=f"grid_current_phase_l{load}",
translation_key=f"grid_current_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.A.phs{phase}",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
*(
SmaEvChargerSensorEntityDescription(
key=f"grid_voltage_phase_l{load}",
translation_key=f"grid_voltage_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.PhV.phs{phase}",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
SmaEvChargerSensorEntityDescription(
key="grid_frequency",
translation_key="grid_frequency",
type=SMAEV_MEASUREMENT,
channel="Measurement.GridMs.Hz",
native_unit_of_measurement=UnitOfFrequency.HERTZ,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.FREQUENCY,
entity_registry_enabled_default=False,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_power",
translation_key="charging_station_power",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWIn.ChaSta",
native_unit_of_measurement=UnitOfPower.WATT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_meter_reading",
translation_key="charging_station_meter_reading",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWhIn.ChaSta",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_session_status",
translation_key="charging_session_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.ChaStt",
value_mapping={
SmaEvChargerMeasurements.NOT_CONNECTED: "not_connected",
SmaEvChargerMeasurements.SLEEP_MODE: "sleep_mode",
SmaEvChargerMeasurements.ACTIVE_MODE: "active_mode",
SmaEvChargerMeasurements.STATION_LOCKED: "station_locked",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="connected_vehicle_status",
translation_key="connected_vehicle_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_status",
translation_key="charging_station_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="mac_address",
translation_key="mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.MacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
SmaEvChargerSensorEntityDescription(
key="wifi_mac_address",
translation_key="wifi_mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.WlMacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger sensors."""
data = hass.data[DOMAIN][config_entry.entry_id]
| """Sensor platform for SMA EV Charger integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
@dataclass
class SmaEvChargerSensorEntityDescription(SensorEntityDescription):
"""Describes SMA EV Charger sensor entities."""
type: str = ""
channel: str = ""
value_mapping: dict = field(default_factory=dict)
SENSOR_DESCRIPTIONS: tuple[SmaEvChargerSensorEntityDescription, ...] = (
SmaEvChargerSensorEntityDescription(
key="charging_session_energy",
translation_key="charging_session_energy",
type=SMAEV_MEASUREMENT,
channel="Measurement.ChaSess.WhIn",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="position_of_rotary_switch",
translation_key="position_of_rotary_switch",
type=SMAEV_MEASUREMENT,
channel="Measurement.Chrg.ModSw",
value_mapping={
SmaEvChargerMeasurements.SMART_CHARGING: "smart_charging",
SmaEvChargerMeasurements.BOOST_CHARGING: "boost_charging",
},
entity_registry_enabled_default=True,
),
*(
SmaEvChargerSensorEntityDescription(
key=f"grid_current_phase_l{load}",
translation_key=f"grid_current_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.A.phs{phase}",
native_unit_of_measurement=UnitOfElectricCurrent.AMPERE,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.CURRENT,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
*(
SmaEvChargerSensorEntityDescription(
key=f"grid_voltage_phase_l{load}",
translation_key=f"grid_voltage_phase_l{load}",
type=SMAEV_MEASUREMENT,
channel=f"Measurement.GridMs.PhV.phs{phase}",
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.VOLTAGE,
entity_registry_enabled_default=False,
)
for phase, load in (("A", 1), ("B", 2), ("C", 3))
),
SmaEvChargerSensorEntityDescription(
key="grid_frequency",
translation_key="grid_frequency",
type=SMAEV_MEASUREMENT,
channel="Measurement.GridMs.Hz",
native_unit_of_measurement=UnitOfFrequency.HERTZ,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.FREQUENCY,
entity_registry_enabled_default=False,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_power",
translation_key="charging_station_power",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWIn.ChaSta",
native_unit_of_measurement=UnitOfPower.WATT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.POWER,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_meter_reading",
translation_key="charging_station_meter_reading",
type=SMAEV_MEASUREMENT,
channel="Measurement.Metering.GridMs.TotWhIn.ChaSta",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_session_status",
translation_key="charging_session_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.ChaStt",
value_mapping={
SmaEvChargerMeasurements.NOT_CONNECTED: "not_connected",
SmaEvChargerMeasurements.SLEEP_MODE: "sleep_mode",
SmaEvChargerMeasurements.ACTIVE_MODE: "active_mode",
SmaEvChargerMeasurements.STATION_LOCKED: "station_locked",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="connected_vehicle_status",
translation_key="connected_vehicle_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.EVeh.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="charging_station_status",
translation_key="charging_station_status",
type=SMAEV_MEASUREMENT,
channel="Measurement.Operation.Health",
value_mapping={
SmaEvChargerMeasurements.OK: "ok",
SmaEvChargerMeasurements.WARNING: "warning",
SmaEvChargerMeasurements.ALARM: "alarm",
SmaEvChargerMeasurements.OFF: "off",
},
entity_registry_enabled_default=True,
),
SmaEvChargerSensorEntityDescription(
key="mac_address",
translation_key="mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.MacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
SmaEvChargerSensorEntityDescription(
key="wifi_mac_address",
translation_key="wifi_mac_address",
type=SMAEV_PARAMETER,
channel="Parameter.Nameplate.WlMacId",
entity_registry_enabled_default=True,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SMA EV Charger sensors."""
data = hass.data[DOMAIN][config_entry.entry_id]
| coordinator = data[SMAEV_COORDINATOR] | 1 | 2023-11-04 07:08:41+00:00 | 4k |
microsoft/promptbase | src/promptbase/bigbench/bigbench.py | [
{
"identifier": "process_cot",
"path": "src/promptbase/bigbench/bigbench_cot.py",
"snippet": "def process_cot(test_name: str, overwrite=False, api_type=\"chat\"):\n _logger.info(\"Starting process_cot\")\n if test_name == \"all\":\n subjects = BIGBENCH_SUBJECTS\n elif test_name in BIGBEN... | from .bigbench_cot import process_cot
from .bigbench_score import score
from .bigbench_answer import process_answers
from promptbase.bigbench.consts import BIGBENCH_SUBJECTS | 1,712 |
def generate(subject: str, overwrite: bool, mode="chat"):
if subject != "all" and subject not in BIGBENCH_SUBJECTS:
print(f"Invalid subject: {subject}")
return
print(f"Running BigBench generation for subject {subject}")
process_cot(subject, overwrite, mode)
process_answers(subject, overwrite, mode)
def evaluate(mode="chat"):
|
def generate(subject: str, overwrite: bool, mode="chat"):
if subject != "all" and subject not in BIGBENCH_SUBJECTS:
print(f"Invalid subject: {subject}")
return
print(f"Running BigBench generation for subject {subject}")
process_cot(subject, overwrite, mode)
process_answers(subject, overwrite, mode)
def evaluate(mode="chat"): | score(mode) | 1 | 2023-12-12 08:00:11+00:00 | 4k |
openai/weak-to-strong | train_weak_to_strong.py | [
{
"identifier": "get_tokenizer",
"path": "weak_to_strong/common.py",
"snippet": "def get_tokenizer(model_name: str):\n \"\"\"\n This function returns a tokenizer based on the model name.\n\n Parameters:\n model_name: The name of the model for which the tokenizer is needed.\n\n Returns:\n ... | import json
import os
import fire
import numpy as np
import torch
import weak_to_strong.logger as logger
from typing import Dict, List, Optional, Sequence, Union
from weak_to_strong.common import get_tokenizer
from weak_to_strong.datasets import (VALID_DATASETS, load_dataset,
tokenize_dataset)
from weak_to_strong.loss import logconf_loss_fn, product_loss_fn, xent_loss
from weak_to_strong.train import ModelConfig, train_and_save_model | 3,459 |
# NOTE learning rates are not particularly tuned, work somewhat reasonably at train batch size 32
MODEL_CONFIGS = [
ModelConfig(
name="gpt2",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-medium",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-large",
default_lr=1e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-xl",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = {
|
# NOTE learning rates are not particularly tuned, work somewhat reasonably at train batch size 32
MODEL_CONFIGS = [
ModelConfig(
name="gpt2",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-medium",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-large",
default_lr=1e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-xl",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = { | "logconf": logconf_loss_fn(), | 4 | 2023-12-13 23:53:13+00:00 | 4k |
linyiLYi/voice-assistant | whisper/whisper.py | [
{
"identifier": "decode",
"path": "whisper/decoding.py",
"snippet": "def decode(\n model: \"Whisper\",\n mel: mx.array,\n options: DecodingOptions = DecodingOptions(),\n **kwargs,\n) -> Union[DecodingResult, List[DecodingResult]]:\n \"\"\"\n Performs decoding of 30-second audio segment... | import base64
import gzip
import math
import mlx.core as mx
import mlx.nn as nn
import numpy as np
from dataclasses import dataclass
from typing import Union
from .decoding import decode as decode_function
from .decoding import detect_language as detect_language_function | 3,173 | x, _, _ = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Module):
def __init__(
self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int,
dtype: mx.Dtype = mx.float16,
):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = mx.zeros((n_ctx, n_state))
self.blocks = [
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
]
self.ln = LayerNorm(n_state)
self._mask = nn.MultiHeadAttention.create_additive_causal_mask(n_ctx).astype(
dtype
)
def __call__(self, x, xa, kv_cache=None):
"""
x : mx.array, shape = (batch_size, <= n_ctx)
the text tokens
xa : mx.array, shape = (batch_size, n_audio_ctx, n_audio_state)
the encoded audio features to be attended on
"""
offset = kv_cache[0][0][0].shape[1] if kv_cache else 0
x = (
self.token_embedding(x)
+ self.positional_embedding[offset : offset + x.shape[-1]]
)
if kv_cache is None:
kv_cache = [None] * len(self.blocks)
cross_qk = [None] * len(self.blocks)
for e, block in enumerate(self.blocks):
x, kv_cache[e], cross_qk[e] = block(
x, xa, mask=self._mask, kv_cache=kv_cache[e]
)
x = self.ln(x)
return x @ self.token_embedding.weight.T, kv_cache, cross_qk
class Whisper(nn.Module):
def __init__(self, dims: ModelDimensions, dtype: mx.Dtype = mx.float16):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer,
dtype,
)
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer,
dtype,
)
# use the last half among the decoder layers for time alignment by default;
# to use a specific set of heads, see `set_alignment_heads()` below.
all_heads = np.zeros(
(self.dims.n_text_layer, self.dims.n_text_head), dtype=bool
)
all_heads[self.dims.n_text_layer // 2 :] = True
self.alignment_heads = mx.array(np.asarray(all_heads.nonzero()).T)
def set_alignment_heads(self, dump: Union[bytes, np.ndarray]):
if isinstance(dump, np.ndarray):
self.alignment_heads = mx.array(dump)
elif isinstance(dump, bytes):
array = np.frombuffer(
gzip.decompress(base64.b85decode(dump)), dtype=bool
).copy()
mask = array.reshape(self.dims.n_text_layer, self.dims.n_text_head)
self.alignment_heads = mx.array(np.asarray(mask.nonzero()).T)
else:
raise ValueError(
f"Invalid type for `dump`: {type(dump)}. Expected a np.ndarray or base85-encoded bytes containing"
" alignment_head information"
)
def embed_audio(self, mel):
return self.encoder(mel)
def logits(self, tokens, audio_features):
return self.decoder(tokens, audio_features)[0]
def forward_with_cross_qk(self, mel, tokens):
logits, _, cross_qk = self.decoder(tokens, self.encoder(mel))
return logits, cross_qk
def __call__(self, mel, tokens):
return self.decoder(tokens, self.encoder(mel))[0]
@property
def is_multilingual(self):
return self.dims.n_vocab >= 51865
@property
def num_languages(self):
return self.dims.n_vocab - 51765 - int(self.is_multilingual)
detect_language = detect_language_function
| # Copyright © 2023 Apple Inc.
@dataclass
class ModelDimensions:
n_mels: int
n_audio_ctx: int
n_audio_state: int
n_audio_head: int
n_audio_layer: int
n_vocab: int
n_text_ctx: int
n_text_state: int
n_text_head: int
n_text_layer: int
def sinusoids(length, channels, max_timescale=10000):
"""Returns sinusoids for positional embedding"""
assert channels % 2 == 0
log_timescale_increment = math.log(max_timescale) / (channels // 2 - 1)
inv_timescales = mx.exp(-log_timescale_increment * mx.arange(channels // 2))
scaled_time = mx.arange(length)[:, None] * inv_timescales[None, :]
return mx.concatenate([mx.sin(scaled_time), mx.cos(scaled_time)], axis=1)
class LayerNorm(nn.LayerNorm):
def __call__(self, x: mx.array) -> mx.array:
return super().__call__(x.astype(mx.float32)).astype(x.dtype)
class MultiHeadAttention(nn.Module):
def __init__(self, n_state: int, n_head: int):
super().__init__()
self.n_head = n_head
self.query = nn.Linear(n_state, n_state)
self.key = nn.Linear(n_state, n_state, bias=False)
self.value = nn.Linear(n_state, n_state)
self.out = nn.Linear(n_state, n_state)
def __call__(
self,
x,
xa=None,
mask=None,
kv_cache=None,
):
q = self.query(x)
if xa is None:
k = self.key(x)
v = self.value(x)
if kv_cache is not None:
k = mx.concatenate([kv_cache[0], k], axis=1)
v = mx.concatenate([kv_cache[1], v], axis=1)
elif kv_cache is None:
k = self.key(xa)
v = self.value(xa)
else:
k, v = kv_cache
wv, qk = self.qkv_attention(q, k, v, mask)
return self.out(wv), (k, v), qk
def qkv_attention(self, q, k, v, mask=None):
n_batch, n_ctx, n_state = q.shape
scale = (n_state // self.n_head) ** -0.25
q = q.reshape(*q.shape[:2], self.n_head, -1).transpose(0, 2, 1, 3) * scale
k = k.reshape(*k.shape[:2], self.n_head, -1).transpose(0, 2, 3, 1) * scale
v = v.reshape(*v.shape[:2], self.n_head, -1).transpose(0, 2, 1, 3)
qk = q @ k
if mask is not None:
qk = qk + mask[:n_ctx, :n_ctx]
qk = qk.astype(mx.float32)
w = mx.softmax(qk, axis=-1).astype(q.dtype)
out = (w @ v).transpose(0, 2, 1, 3)
out = out.reshape(n_batch, n_ctx, n_state)
return out, qk
class ResidualAttentionBlock(nn.Module):
def __init__(self, n_state: int, n_head: int, cross_attention: bool = False):
super().__init__()
self.attn = MultiHeadAttention(n_state, n_head)
self.attn_ln = LayerNorm(n_state)
self.cross_attn = (
MultiHeadAttention(n_state, n_head) if cross_attention else None
)
self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None
n_mlp = n_state * 4
self.mlp1 = nn.Linear(n_state, n_mlp)
self.mlp2 = nn.Linear(n_mlp, n_state)
self.mlp_ln = LayerNorm(n_state)
def __call__(self, x, xa=None, mask=None, kv_cache=None):
kv, cross_kv = kv_cache if kv_cache else (None, None)
y, kv, _ = self.attn(self.attn_ln(x), mask=mask, kv_cache=kv)
x += y
cross_qk = None
if self.cross_attn:
y, cross_kv, cross_qk = self.cross_attn(
self.cross_attn_ln(x), xa, kv_cache=cross_kv
)
x += y
x = x + self.mlp2(nn.gelu(self.mlp1(self.mlp_ln(x))).astype(x.dtype))
return x, (kv, cross_kv), cross_qk
class AudioEncoder(nn.Module):
def __init__(
self,
n_mels: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int,
dtype: mx.Dtype = mx.float16,
):
super().__init__()
self.conv1 = nn.Conv1d(n_mels, n_state, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1)
self._positional_embedding = sinusoids(n_ctx, n_state).astype(dtype)
self.blocks = [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)]
self.ln_post = LayerNorm(n_state)
def __call__(self, x):
x = nn.gelu(self.conv1(x)).astype(x.dtype)
x = nn.gelu(self.conv2(x)).astype(x.dtype)
assert x.shape[1:] == self._positional_embedding.shape, "incorrect audio shape"
x = x + self._positional_embedding
for block in self.blocks:
x, _, _ = block(x)
x = self.ln_post(x)
return x
class TextDecoder(nn.Module):
def __init__(
self,
n_vocab: int,
n_ctx: int,
n_state: int,
n_head: int,
n_layer: int,
dtype: mx.Dtype = mx.float16,
):
super().__init__()
self.token_embedding = nn.Embedding(n_vocab, n_state)
self.positional_embedding = mx.zeros((n_ctx, n_state))
self.blocks = [
ResidualAttentionBlock(n_state, n_head, cross_attention=True)
for _ in range(n_layer)
]
self.ln = LayerNorm(n_state)
self._mask = nn.MultiHeadAttention.create_additive_causal_mask(n_ctx).astype(
dtype
)
def __call__(self, x, xa, kv_cache=None):
"""
x : mx.array, shape = (batch_size, <= n_ctx)
the text tokens
xa : mx.array, shape = (batch_size, n_audio_ctx, n_audio_state)
the encoded audio features to be attended on
"""
offset = kv_cache[0][0][0].shape[1] if kv_cache else 0
x = (
self.token_embedding(x)
+ self.positional_embedding[offset : offset + x.shape[-1]]
)
if kv_cache is None:
kv_cache = [None] * len(self.blocks)
cross_qk = [None] * len(self.blocks)
for e, block in enumerate(self.blocks):
x, kv_cache[e], cross_qk[e] = block(
x, xa, mask=self._mask, kv_cache=kv_cache[e]
)
x = self.ln(x)
return x @ self.token_embedding.weight.T, kv_cache, cross_qk
class Whisper(nn.Module):
def __init__(self, dims: ModelDimensions, dtype: mx.Dtype = mx.float16):
super().__init__()
self.dims = dims
self.encoder = AudioEncoder(
self.dims.n_mels,
self.dims.n_audio_ctx,
self.dims.n_audio_state,
self.dims.n_audio_head,
self.dims.n_audio_layer,
dtype,
)
self.decoder = TextDecoder(
self.dims.n_vocab,
self.dims.n_text_ctx,
self.dims.n_text_state,
self.dims.n_text_head,
self.dims.n_text_layer,
dtype,
)
# use the last half among the decoder layers for time alignment by default;
# to use a specific set of heads, see `set_alignment_heads()` below.
all_heads = np.zeros(
(self.dims.n_text_layer, self.dims.n_text_head), dtype=bool
)
all_heads[self.dims.n_text_layer // 2 :] = True
self.alignment_heads = mx.array(np.asarray(all_heads.nonzero()).T)
def set_alignment_heads(self, dump: Union[bytes, np.ndarray]):
if isinstance(dump, np.ndarray):
self.alignment_heads = mx.array(dump)
elif isinstance(dump, bytes):
array = np.frombuffer(
gzip.decompress(base64.b85decode(dump)), dtype=bool
).copy()
mask = array.reshape(self.dims.n_text_layer, self.dims.n_text_head)
self.alignment_heads = mx.array(np.asarray(mask.nonzero()).T)
else:
raise ValueError(
f"Invalid type for `dump`: {type(dump)}. Expected a np.ndarray or base85-encoded bytes containing"
" alignment_head information"
)
def embed_audio(self, mel):
return self.encoder(mel)
def logits(self, tokens, audio_features):
return self.decoder(tokens, audio_features)[0]
def forward_with_cross_qk(self, mel, tokens):
logits, _, cross_qk = self.decoder(tokens, self.encoder(mel))
return logits, cross_qk
def __call__(self, mel, tokens):
return self.decoder(tokens, self.encoder(mel))[0]
@property
def is_multilingual(self):
return self.dims.n_vocab >= 51865
@property
def num_languages(self):
return self.dims.n_vocab - 51765 - int(self.is_multilingual)
detect_language = detect_language_function | decode = decode_function | 12 | 2023-12-09 13:33:46+00:00 | 4k |
SqueezeAILab/LLMCompiler | configs/movie_react/tools.py | [
{
"identifier": "Tool",
"path": "src/agents/tools.py",
"snippet": "class InvalidTool(BaseTool):\n def _run(\n self,\n requested_tool_name: str,\n available_tool_names: List[str],\n run_manager: Optional[CallbackManagerForToolRun] = None,\n ) -> str:\n async def _arun... | from src.agents.tools import Tool
from src.docstore.wikipedia import DocstoreExplorer, ReActWikipedia | 2,638 |
web_searcher = ReActWikipedia()
docstore = DocstoreExplorer(web_searcher)
tools = [
|
web_searcher = ReActWikipedia()
docstore = DocstoreExplorer(web_searcher)
tools = [ | Tool( | 0 | 2023-12-06 21:12:54+00:00 | 4k |
open-compass/MixtralKit | mixtralkit/layers/attention.py | [
{
"identifier": "ModelArgs",
"path": "mixtralkit/layers/utils.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hid... | import math
import torch
import torch.nn.functional as F
import fairscale.nn.model_parallel.initialize as fs_init
from typing import Optional, Tuple
from torch import nn
from .utils import ModelArgs, repeat_kv
from .position_embeding import apply_rotary_emb
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
RowParallelLinear,
) | 1,631 | # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads
| # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class TorchAttention(nn.Module):
"""Multi-head attention module."""
def __init__(self, args: ModelArgs):
"""
Initialize the Attention module.
Args:
args (ModelArgs): Model configuration parameters.
Attributes:
n_kv_heads (int): Number of key and value heads.
n_local_heads (int): Number of local query heads.
n_local_kv_heads (int): Number of local key and value heads.
n_rep (int): Number of repetitions for local heads.
head_dim (int): Dimension size of each attention head.
wq (ColumnParallelLinear): Linear transformation for queries.
wk (ColumnParallelLinear): Linear transformation for keys.
wv (ColumnParallelLinear): Linear transformation for values.
wo (RowParallelLinear): Linear transformation for output.
cache_k (torch.Tensor): Cached keys for attention.
cache_v (torch.Tensor): Cached values for attention.
"""
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = 1
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
"""
Forward pass of the attention module.
Args:
x (torch.Tensor): Input tensor.
start_pos (int): Starting position for caching.
freqs_cis (torch.Tensor): Precomputed frequency tensor.
mask (torch.Tensor, optional): Attention mask tensor.
Returns:
torch.Tensor: Output tensor after attention.
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads | keys = repeat_kv(keys, self.n_rep) # (bs, cache_len + seqlen, n_local_heads, head_dim) | 1 | 2023-12-09 15:05:26+00:00 | 4k |
aymenfurter/microagents | agents/agent_evaluation.py | [
{
"identifier": "OpenAIAPIWrapper",
"path": "integrations/openaiwrapper.py",
"snippet": "class OpenAIAPIWrapper:\n \"\"\"\n A wrapper class for OpenAI's API.\n \"\"\"\n\n def __init__(self, api_key, timeout=10):\n \"\"\"\n Initializes the OpenAIAPIWrapper instance.\n\n :... | import logging
from integrations.openaiwrapper import OpenAIAPIWrapper
from prompt_management.prompts import AGENT_EVALUATION_PROMPT | 1,626 | # Basic logging setup
logger = logging.getLogger()
class AgentEvaluator:
"""
Evaluates AI agent's responses using OpenAI's GPT model.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper):
self.openai_api = openai_wrapper
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try:
| # Basic logging setup
logger = logging.getLogger()
class AgentEvaluator:
"""
Evaluates AI agent's responses using OpenAI's GPT model.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper):
self.openai_api = openai_wrapper
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try: | formatted_prompt = AGENT_EVALUATION_PROMPT.format(input=input_text, prompt=prompt, output=output) | 1 | 2023-12-11 08:17:09+00:00 | 4k |
bytedance/ImageDream | threestudio/models/guidance/controlnet_guidance.py | [
{
"identifier": "PromptProcessorOutput",
"path": "threestudio/models/prompt_processors/base.py",
"snippet": "class PromptProcessorOutput:\n text_embeddings: Float[Tensor, \"N Nf\"]\n uncond_text_embeddings: Float[Tensor, \"N Nf\"]\n text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n uncond_tex... | import os
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass
from controlnet_aux import CannyDetector, NormalBaeDetector
from diffusers import ControlNetModel, DDIMScheduler, StableDiffusionControlNetPipeline
from diffusers.utils.import_utils import is_xformers_available
from tqdm import tqdm
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseObject
from threestudio.utils.misc import C, parse_version
from threestudio.utils.typing import *
from threestudio.utils.config import ExperimentConfig, load_config
from threestudio.utils.typing import Optional | 2,573 |
@threestudio.register("stable-diffusion-controlnet-guidance")
class ControlNetGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
pretrained_model_name_or_path: str = "SG161222/Realistic_Vision_V2.0"
ddim_scheduler_name_or_path: str = "runwayml/stable-diffusion-v1-5"
control_type: str = "normal" # normal/canny
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
condition_scale: float = 1.5
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
diffusion_steps: int = 20
use_sds: bool = False
# Canny threshold
canny_lower_bound: int = 50
canny_upper_bound: int = 100
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading ControlNet ...")
controlnet_name_or_path: str
if self.cfg.control_type == "normal":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_normalbae"
elif self.cfg.control_type == "canny":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_canny"
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
}
controlnet = ControlNetModel.from_pretrained(
controlnet_name_or_path,
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path, controlnet=controlnet, **pipe_kwargs
).to(self.device)
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.ddim_scheduler_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
if self.cfg.enable_memory_efficient_attention:
|
@threestudio.register("stable-diffusion-controlnet-guidance")
class ControlNetGuidance(BaseObject):
@dataclass
class Config(BaseObject.Config):
cache_dir: Optional[str] = None
pretrained_model_name_or_path: str = "SG161222/Realistic_Vision_V2.0"
ddim_scheduler_name_or_path: str = "runwayml/stable-diffusion-v1-5"
control_type: str = "normal" # normal/canny
enable_memory_efficient_attention: bool = False
enable_sequential_cpu_offload: bool = False
enable_attention_slicing: bool = False
enable_channels_last_format: bool = False
guidance_scale: float = 7.5
condition_scale: float = 1.5
grad_clip: Optional[
Any
] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000])
half_precision_weights: bool = True
min_step_percent: float = 0.02
max_step_percent: float = 0.98
diffusion_steps: int = 20
use_sds: bool = False
# Canny threshold
canny_lower_bound: int = 50
canny_upper_bound: int = 100
cfg: Config
def configure(self) -> None:
threestudio.info(f"Loading ControlNet ...")
controlnet_name_or_path: str
if self.cfg.control_type == "normal":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_normalbae"
elif self.cfg.control_type == "canny":
controlnet_name_or_path = "lllyasviel/control_v11p_sd15_canny"
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
pipe_kwargs = {
"safety_checker": None,
"feature_extractor": None,
"requires_safety_checker": False,
"torch_dtype": self.weights_dtype,
"cache_dir": self.cfg.cache_dir,
}
controlnet = ControlNetModel.from_pretrained(
controlnet_name_or_path,
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path, controlnet=controlnet, **pipe_kwargs
).to(self.device)
self.scheduler = DDIMScheduler.from_pretrained(
self.cfg.ddim_scheduler_name_or_path,
subfolder="scheduler",
torch_dtype=self.weights_dtype,
cache_dir=self.cfg.cache_dir,
)
self.scheduler.set_timesteps(self.cfg.diffusion_steps)
if self.cfg.enable_memory_efficient_attention: | if parse_version(torch.__version__) >= parse_version("2"): | 3 | 2023-12-13 21:09:37+00:00 | 4k |
TencentARC/MotionCtrl | lvdm/modules/networks/openaimodel3d_next.py | [
{
"identifier": "avg_pool_nd",
"path": "lvdm/basics.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*arg... | import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from abc import abstractmethod
from functools import partial
from einops import rearrange, repeat
from lvdm.basics import (avg_pool_nd, conv_nd, linear, normalization,
zero_module)
from lvdm.common import checkpoint
from lvdm.models.utils_diffusion import timestep_embedding
from lvdm.modules.attention import SpatialTransformer, TemporalTransformer | 2,896 |
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, batch_size=None, is_imgbatch=False):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb, batch_size, is_imgbatch=is_imgbatch)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
|
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, batch_size=None, is_imgbatch=False):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb, batch_size, is_imgbatch=is_imgbatch)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context) | elif isinstance(layer, TemporalTransformer): | 8 | 2023-12-06 07:27:45+00:00 | 4k |
s-casci/tinyzero | tictactoe/two_dim/train.py | [
{
"identifier": "TicTacToe2DNetwork",
"path": "models.py",
"snippet": "class TicTacToe2DNetwork(nn.Module):\n def __init__(self, input_shape, action_space, first_linear_size=512, second_linear_size=256):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=1)\n self.conv2 = nn.Conv... | from game import TicTacToe
from datetime import datetime
from tqdm import tqdm
from models import TicTacToe2DNetwork # noqa: E402
from agents import AlphaZeroAgentTrainer # noqa: E402
import torch
import wandb
import os
import sys | 1,618 |
sys.path.append(os.getcwd())
OUT_DIR = "tictactoe/two_dim/out"
INIT_FROM_CHECKPOINT = False
SELFPLAY_GAMES = 5000
SELFPLAY_GAMES_PER_SAVE = SELFPLAY_GAMES // 4
BATCH_SIZE = 128
SEARCH_ITERATIONS = 32
MAX_REPLAY_BUFFER_SIZE = BATCH_SIZE * 4
TRAINING_EPOCHS = 5
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-1
C_PUCT = 1.8
DIRICHLET_ALPHA = 0.3 # set to None to disable
WANDB_LOG = True
WANDB_PROJECT_NAME = "tinyalphazero-tictactoe2d"
WANDB_RUN_NAME = "run" + datetime.now().strftime("%Y%m%d-%H%M%S")
if __name__ == "__main__":
game = TicTacToe()
model = TicTacToe2DNetwork(game.observation_shape, game.action_space)
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
|
sys.path.append(os.getcwd())
OUT_DIR = "tictactoe/two_dim/out"
INIT_FROM_CHECKPOINT = False
SELFPLAY_GAMES = 5000
SELFPLAY_GAMES_PER_SAVE = SELFPLAY_GAMES // 4
BATCH_SIZE = 128
SEARCH_ITERATIONS = 32
MAX_REPLAY_BUFFER_SIZE = BATCH_SIZE * 4
TRAINING_EPOCHS = 5
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-1
C_PUCT = 1.8
DIRICHLET_ALPHA = 0.3 # set to None to disable
WANDB_LOG = True
WANDB_PROJECT_NAME = "tinyalphazero-tictactoe2d"
WANDB_RUN_NAME = "run" + datetime.now().strftime("%Y%m%d-%H%M%S")
if __name__ == "__main__":
game = TicTacToe()
model = TicTacToe2DNetwork(game.observation_shape, game.action_space)
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
| agent = AlphaZeroAgentTrainer(model, optimizer, MAX_REPLAY_BUFFER_SIZE) | 1 | 2023-12-14 11:36:50+00:00 | 4k |
facebookresearch/PurpleLlama | CybersecurityBenchmarks/insecure_code_detector/tests/test_java_insecure_code_detector.py | [
{
"identifier": "Language",
"path": "CybersecurityBenchmarks/insecure_code_detector/languages.py",
"snippet": "class Language(str, enum.Enum):\n C = \"c\"\n CPP = \"cpp\"\n CSHARP = \"csharp\"\n HACK = \"hack\"\n JAVA = \"java\"\n JAVASCRIPT = \"javascript\"\n KOTLIN = \"kotlin\"\n ... | from ..languages import Language
from .insecure_code_detector_test import InsecureCodeDetectorTest | 1,787 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
JAVA_TEST_CASES = [
('final Cipher cipher = Cipher.getInstance("AES/ECB/PKCS5Padding");', 1),
('final Cipher cipher = Cipher.getInstance("DES/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("DESede/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("AES");', 1),
(".setBlockModes(KeyProperties.BLOCK_MODE_ECB)", 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding")', 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS7Padding")', 1),
('SecretKeyFactory.getInstance("3DES");', 1),
('SecretKeyFactory.getInstance("DESede");', 1),
('SecretKeyFactory.getInstance("DES");', 1),
('SecretKeyFactory.getInstance("Blowfish");', 1),
('SecretKeyFactory.getInstance("DESEDEWRAP");', 1),
("Test nullCipher = new javax.crypto.NullCipher();", 1),
("Test customCipher = new NullCipher();", 1),
('Test test = MessageDigest.getInstance("SHA-1");', 1),
('Test test = MessageDigest.getInstance("SHA1", "SUN");', 1),
("byte[] test = DigestUtils.getSha1Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1Hex(foo.getBytes());", 1),
(
'customXmlFactory.setProperty("javax.xml.stream.isSupportingExternalEntities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", false);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-parameter-entities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-general-entities", true);',
1,
),
('Cipher.getInstance("RSA/NONE/NoPadding", "BC");', 1),
('Cipher.getInstance("RSA/NONE/NoPadding");', 1),
('Test test = MessageDigest.getInstance("MD2");', 1),
('Test test = MessageDigest.getInstance("MD2", "SUN");', 1),
("byte[] test = DigestUtils.getMd2Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD4");', 1),
('Test test = MessageDigest.getInstance("MD4", "SUN");', 1),
("byte[] test = DigestUtils.getMd4Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD5");', 1),
('Test test = MessageDigest.getInstance("md5");', 1),
('Test test = MessageDigest.getInstance("MD5", "SUN");', 1),
("byte[] test = DigestUtils.getMd5Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5Hex(foo.getBytes());", 1),
('final Cipher cipher = Cipher.getInstance("RC2");', 1),
('final Cipher cipher = Cipher.getInstance("RC4");', 1),
('final Cipher cipher = Cipher.getInstance("ARCFOUR");', 1),
("WebView.setWebContentsDebuggingEnabled(true);", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_READABLE );", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_WRITABLE );", 1),
("settings.setAllowFileAccess(true);", 1),
("settings.setAllowFileAccessFromURLs(true);", 1),
("settings.setAllowUniversalAccessFromURLs(true);", 1),
("import org.apache.http.conn.ssl.NoopHostnameVerifier;", 1),
("ALLOW_ALL_HOSTNAME_VERIFIER", 1),
("HostnameVerifier allowAll = new AllowAllHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NullHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NoopHostnameVerifier();", 1),
("ssf.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);", 1),
(
'String dir = System.getProperty("dir");Runtime rt = Runtime.getRuntime(); Process proc = rt.exec(new String[] {"sh", "-c", "ls " + dir});',
1,
),
("Runtime.getRuntime().exec(userInput)", 1),
]
class TestJavaInsecureCodeDetector(InsecureCodeDetectorTest):
async def test_java_regex_patterns(self) -> None:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# the following test cases contain an input string, and the corresponding number of expected insecure pattern matches
JAVA_TEST_CASES = [
('final Cipher cipher = Cipher.getInstance("AES/ECB/PKCS5Padding");', 1),
('final Cipher cipher = Cipher.getInstance("DES/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("DESede/ECB/PKCS5Padding");', 2),
('final Cipher cipher = Cipher.getInstance("AES");', 1),
(".setBlockModes(KeyProperties.BLOCK_MODE_ECB)", 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding")', 1),
('final Cipher cipher = Cipher.getInstance("AES/CBC/PKCS7Padding")', 1),
('SecretKeyFactory.getInstance("3DES");', 1),
('SecretKeyFactory.getInstance("DESede");', 1),
('SecretKeyFactory.getInstance("DES");', 1),
('SecretKeyFactory.getInstance("Blowfish");', 1),
('SecretKeyFactory.getInstance("DESEDEWRAP");', 1),
("Test nullCipher = new javax.crypto.NullCipher();", 1),
("Test customCipher = new NullCipher();", 1),
('Test test = MessageDigest.getInstance("SHA-1");', 1),
('Test test = MessageDigest.getInstance("SHA1", "SUN");', 1),
("byte[] test = DigestUtils.getSha1Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1(foo.getBytes());", 1),
("byte[] test = DigestUtils.sha1Hex(foo.getBytes());", 1),
(
'customXmlFactory.setProperty("javax.xml.stream.isSupportingExternalEntities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", false);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-parameter-entities", true);',
1,
),
(
'DocumentBuilderFactory.setFeature("http://xml.org/sax/features/external-general-entities", true);',
1,
),
('Cipher.getInstance("RSA/NONE/NoPadding", "BC");', 1),
('Cipher.getInstance("RSA/NONE/NoPadding");', 1),
('Test test = MessageDigest.getInstance("MD2");', 1),
('Test test = MessageDigest.getInstance("MD2", "SUN");', 1),
("byte[] test = DigestUtils.getMd2Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2(foo.getBytes());", 1),
("byte[] test = DigestUtils.md2Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD4");', 1),
('Test test = MessageDigest.getInstance("MD4", "SUN");', 1),
("byte[] test = DigestUtils.getMd4Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4(foo.getBytes());", 1),
("byte[] test = DigestUtils.md4Hex(foo.getBytes());", 1),
('Test test = MessageDigest.getInstance("MD5");', 1),
('Test test = MessageDigest.getInstance("md5");', 1),
('Test test = MessageDigest.getInstance("MD5", "SUN");', 1),
("byte[] test = DigestUtils.getMd5Digest().digest(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5(foo.getBytes());", 1),
("byte[] test = DigestUtils.md5Hex(foo.getBytes());", 1),
('final Cipher cipher = Cipher.getInstance("RC2");', 1),
('final Cipher cipher = Cipher.getInstance("RC4");', 1),
('final Cipher cipher = Cipher.getInstance("ARCFOUR");', 1),
("WebView.setWebContentsDebuggingEnabled(true);", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_READABLE );", 1),
("mContext.openFileOutput( foo, Context.MODE_WORLD_WRITABLE );", 1),
("settings.setAllowFileAccess(true);", 1),
("settings.setAllowFileAccessFromURLs(true);", 1),
("settings.setAllowUniversalAccessFromURLs(true);", 1),
("import org.apache.http.conn.ssl.NoopHostnameVerifier;", 1),
("ALLOW_ALL_HOSTNAME_VERIFIER", 1),
("HostnameVerifier allowAll = new AllowAllHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NullHostnameVerifier();", 1),
("HostnameVerifier allowAll = new NoopHostnameVerifier();", 1),
("ssf.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);", 1),
(
'String dir = System.getProperty("dir");Runtime rt = Runtime.getRuntime(); Process proc = rt.exec(new String[] {"sh", "-c", "ls " + dir});',
1,
),
("Runtime.getRuntime().exec(userInput)", 1),
]
class TestJavaInsecureCodeDetector(InsecureCodeDetectorTest):
async def test_java_regex_patterns(self) -> None: | await self.run_regex_tests(Language.JAVA, JAVA_TEST_CASES) | 0 | 2023-12-06 21:29:41+00:00 | 4k |
allenai/unified-io-2 | demo/utils/video_utils.py | [
{
"identifier": "get_video_length",
"path": "create_data/utils.py",
"snippet": "def get_video_length(video_path):\n \"\"\"this gets just the video stream length (in the case audio stream is longer)\"\"\"\n # E.g. k700-2020/train/watering plants/af3epdZsrTc_000178_000188.mp4\n # if audio is shorter th... | import os.path
import random
import string
import subprocess
import time
import gradio as gr
from create_data.utils import (
get_video_length,
create_audio_from_video,
extract_frames_from_video,
BUFFER_FROM_END,
)
from demo.utils.audio_utils import extract_spectrograms_from_audio | 2,704 |
__all__ = ["load_video"]
def extract_frames_and_spectrograms_from_video(
video_file,
audio_dir,
video_length=None,
video_segment_length=None,
audio_segment_length=None,
times=None,
clip_start_time=0,
clip_end_time=None,
num_frames=None,
target_size=(256, 256),
*,
use_audio,
):
if times is None:
# get actual video length
if video_length is None:
video_length = get_video_length(video_file)
if video_length is None:
print(f"Couldn't get video length for {video_file}")
return None, None
if video_segment_length is None:
video_segment_length = video_length / num_frames
if video_length < (video_segment_length / 2.0) - BUFFER_FROM_END:
print(
f"Video is too short ({video_length}s is less than half the segment length of {video_segment_length}s segments"
)
return None, None
else:
# don't need this if times is given
video_length = None
# extract image frames
# t0 = perf_counter()
frames, boundaries = extract_frames_from_video(
video_file,
video_length,
video_segment_length,
times=times,
clip_start_time=clip_start_time,
clip_end_time=clip_end_time,
num_frames=num_frames,
multiprocess=False,
resize=True,
target_size=target_size,
)
# print(f"Load video in {perf_counter() - t0} seconds in total")
spectrograms = None
if use_audio:
# expects the audio file to be created already (since it takes some time)
audio_file = create_audio_from_video(video_file, audio_dir, force=True)
if os.path.exists(audio_file): # in case video w/o audio
# extract audio segments
|
__all__ = ["load_video"]
def extract_frames_and_spectrograms_from_video(
video_file,
audio_dir,
video_length=None,
video_segment_length=None,
audio_segment_length=None,
times=None,
clip_start_time=0,
clip_end_time=None,
num_frames=None,
target_size=(256, 256),
*,
use_audio,
):
if times is None:
# get actual video length
if video_length is None:
video_length = get_video_length(video_file)
if video_length is None:
print(f"Couldn't get video length for {video_file}")
return None, None
if video_segment_length is None:
video_segment_length = video_length / num_frames
if video_length < (video_segment_length / 2.0) - BUFFER_FROM_END:
print(
f"Video is too short ({video_length}s is less than half the segment length of {video_segment_length}s segments"
)
return None, None
else:
# don't need this if times is given
video_length = None
# extract image frames
# t0 = perf_counter()
frames, boundaries = extract_frames_from_video(
video_file,
video_length,
video_segment_length,
times=times,
clip_start_time=clip_start_time,
clip_end_time=clip_end_time,
num_frames=num_frames,
multiprocess=False,
resize=True,
target_size=target_size,
)
# print(f"Load video in {perf_counter() - t0} seconds in total")
spectrograms = None
if use_audio:
# expects the audio file to be created already (since it takes some time)
audio_file = create_audio_from_video(video_file, audio_dir, force=True)
if os.path.exists(audio_file): # in case video w/o audio
# extract audio segments | spectrograms = extract_spectrograms_from_audio( | 4 | 2023-12-12 20:23:33+00:00 | 4k |
zju3dv/EasyVolcap | scripts/zjumocap/cleanup_instant-nb_data.py | [
{
"identifier": "parallel_execution",
"path": "easyvolcap/utils/parallel_utils.py",
"snippet": "def parallel_execution(*args, action: Callable, num_workers=32, print_progress=False, sequential=False, async_return=False, desc=None, use_process=False, **kwargs):\n \"\"\"\n Executes a given function ... | import os
import argparse
import sys
from glob import glob
from os.path import join
from easyvolcap.utils.parallel_utils import parallel_execution
from easyvolcap.utils.console_utils import run, log, run_if_not_exists | 1,816 |
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--source_root', default='/nas/home/xuzhen/datasets/my_zjumocap')
parser.add_argument('--target_root', default='data/my_zjumocap')
parser.add_argument('--dry_run', action='store_true')
parser.add_argument('--human', default='my_377')
args = parser.parse_args()
args.source_root = join(args.source_root, args.human)
args.target_root = join(args.target_root, args.human)
# grab all image files
|
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--source_root', default='/nas/home/xuzhen/datasets/my_zjumocap')
parser.add_argument('--target_root', default='data/my_zjumocap')
parser.add_argument('--dry_run', action='store_true')
parser.add_argument('--human', default='my_377')
args = parser.parse_args()
args.source_root = join(args.source_root, args.human)
args.target_root = join(args.target_root, args.human)
# grab all image files | log(f'grabbing all image files, only second level or more') | 2 | 2023-12-07 08:53:42+00:00 | 4k |
minghanqin/LangSplat | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T... | import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud | 3,355 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info: | W2C = getWorld2View2(cam.R, cam.T) | 7 | 2023-12-11 06:33:35+00:00 | 4k |
alibaba/animate-anything | utils/dataset.py | [
{
"identifier": "sensible_buckets",
"path": "utils/bucketing.py",
"snippet": "def sensible_buckets(m_width, m_height, w, h, min_size=192):\n if h > w:\n w = resolve_bucket(m_width, h, w)\n w = closest_bucket(m_width, w, 'down', min_size=min_size)\n return w, m_height\n if h < ... | import os
import decord
import numpy as np
import random
import json
import torchvision
import torchvision.transforms as T
import torch
import traceback
from glob import glob
from PIL import Image
from itertools import islice
from pathlib import Path
from .bucketing import sensible_buckets
from .common import get_moved_area_mask, calculate_motion_score
from torch.utils.data import Dataset
from einops import rearrange, repeat | 2,833 |
else:
vr = decord.VideoReader(vid_path)
video = get_frame_batch(vr)
return video, vr
# https://github.com/ExponentialML/Video-BLIP2-Preprocessor
class VideoBLIPDataset(Dataset):
def __init__(
self,
tokenizer = None,
width: int = 256,
height: int = 256,
n_sample_frames: int = 4,
sample_start_idx: int = 1,
fps: int = 1,
json_path: str ="",
json_data = None,
vid_data_key: str = "video_path",
preprocessed: bool = False,
use_bucketing: bool = False,
cache_latents: bool = False,
motion_threshold = 50,
**kwargs
):
self.vid_types = (".mp4", ".avi", ".mov", ".webm", ".flv", ".mjpeg")
self.use_bucketing = use_bucketing
self.tokenizer = tokenizer
self.preprocessed = preprocessed
self.vid_data_key = vid_data_key
self.train_data = self.load_from_json(json_path, json_data)
self.cache_latents = cache_latents
self.motion_threshold = motion_threshold
self.width = width
self.height = height
self.n_sample_frames = n_sample_frames
self.sample_start_idx = sample_start_idx
self.fps = fps
self.transform = T.Compose([
#T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)
T.Resize(min(height, width), antialias=False),
T.CenterCrop([height, width])
])
def build_json(self, json_data):
extended_data = []
for data in json_data['data']:
for nested_data in data['data']:
self.build_json_dict(
data,
nested_data,
extended_data
)
json_data = extended_data
return json_data
def build_json_dict(self, data, nested_data, extended_data):
clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None
extended_data.append({
self.vid_data_key: data[self.vid_data_key],
'frame_index': nested_data['frame_index'],
'prompt': nested_data['prompt'],
'clip_path': clip_path
})
def load_from_json(self, path, json_data):
try:
with open(path) as jpath:
print(f"Loading JSON from {path}")
json_data = json.load(jpath)
return self.build_json(json_data)
except:
traceback.print_exc()
self.train_data = []
print("Non-existant JSON path. Skipping.")
def validate_json(self, base_path, path):
return os.path.exists(f"{base_path}/{path}")
def get_frame_buckets(self, vr):
_, h, w = vr[0].shape
width, height = sensible_buckets(self.width, self.height, h, w)
resize = T.transforms.Resize((height, width), antialias=True)
return resize
def train_data_batch(self, index):
vid_data = self.train_data[index]
# Get video prompt
prompt = vid_data['prompt']
# If we are training on individual clips.
if 'clip_path' in self.train_data[index] and \
self.train_data[index]['clip_path'] is not None:
clip_path = vid_data['clip_path']
else:
clip_path = vid_data[self.vid_data_key]
# Get the frame of the current index.
self.sample_start_idx = vid_data['frame_index']
cache_path = os.path.splitext(clip_path)[0] + '.pt'
if self.cache_latents and os.path.exists(cache_path):
return torch.load(cache_path, map_location='cpu')
vr = decord.VideoReader(clip_path)
video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)
prompt_ids = get_prompt_ids(prompt, self.tokenizer)
example = {
"pixel_values": normalize_input(video),
"prompt_ids": prompt_ids,
"text_prompt": prompt,
'dataset': self.__getname__(),
'cache_path': cache_path,
}
mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())
example['mask'] = mask
|
decord.bridge.set_bridge('torch')
# Inspired by the VideoMAE repository.
def normalize_input(
item,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
use_simple_norm=True
):
if item.dtype == torch.uint8 and not use_simple_norm:
item = rearrange(item, 'f c h w -> f h w c')
item = item.float() / 255.0
mean = torch.tensor(mean)
std = torch.tensor(std)
out = rearrange((item - mean) / std, 'f h w c -> f c h w')
return out
else:
# Normalize between -1 & 1
item = rearrange(item, 'f c h w -> f h w c')
return rearrange(item / 127.5 - 1.0, 'f h w c -> f c h w')
def get_prompt_ids(prompt, tokenizer):
if tokenizer is None:
prompt_ids = torch.tensor([0])
else:
prompt_ids = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
return prompt_ids
def read_caption_file(caption_file):
with open(caption_file, 'r', encoding="utf8") as t:
return t.read()
def get_text_prompt(
text_prompt: str = '',
fallback_prompt: str= '',
file_path:str = '',
ext_types=['.mp4'],
use_caption=False
):
try:
if use_caption:
if len(text_prompt) > 1: return text_prompt
caption_file = ''
# Use caption on per-video basis (One caption PER video)
for ext in ext_types:
maybe_file = file_path.replace(ext, '.txt')
if maybe_file.endswith(ext_types): continue
if os.path.exists(maybe_file):
caption_file = maybe_file
break
if os.path.exists(caption_file):
return read_caption_file(caption_file)
# Return fallback prompt if no conditions are met.
return fallback_prompt
return text_prompt
except:
print(f"Couldn't read prompt caption for {file_path}. Using fallback.")
return fallback_prompt
def get_frame_batch(max_frames, sample_fps, vr, transform):
native_fps = vr.get_avg_fps()
max_range = len(vr)
frame_step = max(1, round(native_fps / sample_fps))
frame_range = range(0, max_range, frame_step)
if len(frame_range) < max_frames:
frame_range = np.linspace(frame_number, max_range-1, max_frames).astype(int)
#start = random.randint(0, len(frame_range) - max_frames)
start = len(frame_range) - max_frames
frame_range_indices = list(frame_range)[start:start+max_frames]
frames = vr.get_batch(frame_range_indices)
video = rearrange(frames, "f h w c -> f c h w")
video = transform(video)
return video
def process_video(vid_path, use_bucketing, w, h, get_frame_buckets, get_frame_batch):
if use_bucketing:
vr = decord.VideoReader(vid_path)
resize = get_frame_buckets(vr)
video = get_frame_batch(vr, resize=resize)
else:
vr = decord.VideoReader(vid_path)
video = get_frame_batch(vr)
return video, vr
# https://github.com/ExponentialML/Video-BLIP2-Preprocessor
class VideoBLIPDataset(Dataset):
def __init__(
self,
tokenizer = None,
width: int = 256,
height: int = 256,
n_sample_frames: int = 4,
sample_start_idx: int = 1,
fps: int = 1,
json_path: str ="",
json_data = None,
vid_data_key: str = "video_path",
preprocessed: bool = False,
use_bucketing: bool = False,
cache_latents: bool = False,
motion_threshold = 50,
**kwargs
):
self.vid_types = (".mp4", ".avi", ".mov", ".webm", ".flv", ".mjpeg")
self.use_bucketing = use_bucketing
self.tokenizer = tokenizer
self.preprocessed = preprocessed
self.vid_data_key = vid_data_key
self.train_data = self.load_from_json(json_path, json_data)
self.cache_latents = cache_latents
self.motion_threshold = motion_threshold
self.width = width
self.height = height
self.n_sample_frames = n_sample_frames
self.sample_start_idx = sample_start_idx
self.fps = fps
self.transform = T.Compose([
#T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)
T.Resize(min(height, width), antialias=False),
T.CenterCrop([height, width])
])
def build_json(self, json_data):
extended_data = []
for data in json_data['data']:
for nested_data in data['data']:
self.build_json_dict(
data,
nested_data,
extended_data
)
json_data = extended_data
return json_data
def build_json_dict(self, data, nested_data, extended_data):
clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None
extended_data.append({
self.vid_data_key: data[self.vid_data_key],
'frame_index': nested_data['frame_index'],
'prompt': nested_data['prompt'],
'clip_path': clip_path
})
def load_from_json(self, path, json_data):
try:
with open(path) as jpath:
print(f"Loading JSON from {path}")
json_data = json.load(jpath)
return self.build_json(json_data)
except:
traceback.print_exc()
self.train_data = []
print("Non-existant JSON path. Skipping.")
def validate_json(self, base_path, path):
return os.path.exists(f"{base_path}/{path}")
def get_frame_buckets(self, vr):
_, h, w = vr[0].shape
width, height = sensible_buckets(self.width, self.height, h, w)
resize = T.transforms.Resize((height, width), antialias=True)
return resize
def train_data_batch(self, index):
vid_data = self.train_data[index]
# Get video prompt
prompt = vid_data['prompt']
# If we are training on individual clips.
if 'clip_path' in self.train_data[index] and \
self.train_data[index]['clip_path'] is not None:
clip_path = vid_data['clip_path']
else:
clip_path = vid_data[self.vid_data_key]
# Get the frame of the current index.
self.sample_start_idx = vid_data['frame_index']
cache_path = os.path.splitext(clip_path)[0] + '.pt'
if self.cache_latents and os.path.exists(cache_path):
return torch.load(cache_path, map_location='cpu')
vr = decord.VideoReader(clip_path)
video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)
prompt_ids = get_prompt_ids(prompt, self.tokenizer)
example = {
"pixel_values": normalize_input(video),
"prompt_ids": prompt_ids,
"text_prompt": prompt,
'dataset': self.__getname__(),
'cache_path': cache_path,
}
mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())
example['mask'] = mask | example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy()) | 2 | 2023-12-07 08:26:29+00:00 | 4k |
SciPhi-AI/agent-search | agent_search/app/server.py | [
{
"identifier": "load_config",
"path": "agent_search/core/utils.py",
"snippet": "def load_config(config_dir: Optional[str] = None) -> configparser.ConfigParser:\n \"\"\"Load the configuration file.\"\"\"\n config = configparser.ConfigParser()\n if not config_dir:\n config_dir = get_data_... | import logging
import time
import uvicorn
from typing import Optional
from pydantic import BaseModel
from agent_search.core.utils import load_config, select_top_urls
from agent_search.search import WebSearchEngine
from fastapi import FastAPI, HTTPException | 3,005 |
# Attempt to import uvicorn and FastAPI
try:
except ImportError as e:
raise ImportError(
f"Error: {e}, Note - both uvicorn and FastAPI are required to run the server."
)
logger = logging.getLogger(__name__)
class SearchServer:
def __init__(self):
self.client = WebSearchEngine()
def run(
self,
query="What is a lagrangian?",
limit_broad_results=1_000,
limit_deduped_url_results=50,
limit_hierarchical_url_results=50,
limit_final_pagerank_results=20,
url_contains_filter=None,
):
"""Run a search query using the WebSearchEngine client"""
query_vector = self.client.get_query_vector(query)
broad_results = self.client.similarity_search(
query_vector=query_vector, limit=limit_broad_results
)
if not url_contains_filter:
url_contains_filter = []
deduped_url_results = select_top_urls(
broad_results,
max_urls=limit_deduped_url_results,
url_contains=url_contains_filter,
)
hierarchical_url_results = (
self.client.hierarchical_similarity_reranking(
query_vector=query_vector,
urls=deduped_url_results,
limit=limit_hierarchical_url_results,
)
)
pagerank_reranked_results = self.client.pagerank_reranking(
hierarchical_url_results
)[:limit_final_pagerank_results]
return pagerank_reranked_results
class SearchQuery(BaseModel):
"""A search query data model"""
query: str
limit_broad_results: Optional[int] = 1_000
limit_deduped_url_results: Optional[int] = 100
limit_hierarchical_url_results: Optional[int] = 25
limit_final_pagerank_results: Optional[int] = 10
app = FastAPI()
search_runner = SearchServer()
def check_limits(query: SearchQuery):
"""Check if the limit parameters exceed three times their default values"""
if query.limit_broad_results > 3 * 1_000:
raise ValueError(
"limit_broad_results exceeds 3 times its default value"
)
if query.limit_deduped_url_results > 3 * 100:
raise ValueError(
"limit_deduped_url_results exceeds 3 times its default value"
)
if query.limit_hierarchical_url_results > 3 * 25:
raise ValueError(
"limit_hierarchical_url_results exceeds 3 times its default value"
)
if query.limit_final_pagerank_results > 3 * 10:
raise ValueError(
"limit_final_pagerank_results exceeds 3 times its default value"
)
@app.post("/search")
def run_search(query: SearchQuery):
"""Run a search query"""
try:
check_limits(query)
results = search_runner.run(
query=query.query,
limit_broad_results=query.limit_broad_results,
limit_deduped_url_results=query.limit_deduped_url_results,
limit_hierarchical_url_results=query.limit_hierarchical_url_results,
limit_final_pagerank_results=query.limit_final_pagerank_results,
)
return {"results": results}
except ValueError as e:
logger.error(f"ValueError {e} = ", e)
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Exception {e} = ", e)
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
def health_check():
"""Health check endpoint"""
return {"status": "ok"}
if __name__ == "__main__":
|
# Attempt to import uvicorn and FastAPI
try:
except ImportError as e:
raise ImportError(
f"Error: {e}, Note - both uvicorn and FastAPI are required to run the server."
)
logger = logging.getLogger(__name__)
class SearchServer:
def __init__(self):
self.client = WebSearchEngine()
def run(
self,
query="What is a lagrangian?",
limit_broad_results=1_000,
limit_deduped_url_results=50,
limit_hierarchical_url_results=50,
limit_final_pagerank_results=20,
url_contains_filter=None,
):
"""Run a search query using the WebSearchEngine client"""
query_vector = self.client.get_query_vector(query)
broad_results = self.client.similarity_search(
query_vector=query_vector, limit=limit_broad_results
)
if not url_contains_filter:
url_contains_filter = []
deduped_url_results = select_top_urls(
broad_results,
max_urls=limit_deduped_url_results,
url_contains=url_contains_filter,
)
hierarchical_url_results = (
self.client.hierarchical_similarity_reranking(
query_vector=query_vector,
urls=deduped_url_results,
limit=limit_hierarchical_url_results,
)
)
pagerank_reranked_results = self.client.pagerank_reranking(
hierarchical_url_results
)[:limit_final_pagerank_results]
return pagerank_reranked_results
class SearchQuery(BaseModel):
"""A search query data model"""
query: str
limit_broad_results: Optional[int] = 1_000
limit_deduped_url_results: Optional[int] = 100
limit_hierarchical_url_results: Optional[int] = 25
limit_final_pagerank_results: Optional[int] = 10
app = FastAPI()
search_runner = SearchServer()
def check_limits(query: SearchQuery):
"""Check if the limit parameters exceed three times their default values"""
if query.limit_broad_results > 3 * 1_000:
raise ValueError(
"limit_broad_results exceeds 3 times its default value"
)
if query.limit_deduped_url_results > 3 * 100:
raise ValueError(
"limit_deduped_url_results exceeds 3 times its default value"
)
if query.limit_hierarchical_url_results > 3 * 25:
raise ValueError(
"limit_hierarchical_url_results exceeds 3 times its default value"
)
if query.limit_final_pagerank_results > 3 * 10:
raise ValueError(
"limit_final_pagerank_results exceeds 3 times its default value"
)
@app.post("/search")
def run_search(query: SearchQuery):
"""Run a search query"""
try:
check_limits(query)
results = search_runner.run(
query=query.query,
limit_broad_results=query.limit_broad_results,
limit_deduped_url_results=query.limit_deduped_url_results,
limit_hierarchical_url_results=query.limit_hierarchical_url_results,
limit_final_pagerank_results=query.limit_final_pagerank_results,
)
return {"results": results}
except ValueError as e:
logger.error(f"ValueError {e} = ", e)
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
logger.error(f"Exception {e} = ", e)
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
def health_check():
"""Health check endpoint"""
return {"status": "ok"}
if __name__ == "__main__": | config = load_config()["server"] | 0 | 2023-12-11 17:41:03+00:00 | 4k |
yohanshin/WHAM | lib/models/layers/modules.py | [
{
"identifier": "constants",
"path": "configs/constants.py",
"snippet": "IMG_FEAT_DIM = {\n 'resnet': 2048,\n 'vit': 1024\n}\nN_JOINTS = 17\n PARSED_DATA = f'{root}/parsed_data'\n THREEDPW_PTH = f'{root}/3DPW'\n RICH_PTH = f'{root}/RICH'\n EMDB_PTH = f'{root}/EMDB'\n NUM_JOINTS = N_... | import torch
import numpy as np
from torch import nn
from configs import constants as _C
from .utils import rollout_global_motion
from lib.utils.transforms import axis_angle_to_matrix | 3,193 | pred_list = [init[..., :self.n_joints * 3]]
motion_context_list = []
for i in range(self.f):
(pred_kp3d, ), motion_context, h0 = self.regressor(x[:, [i]], pred_list[-1:], h0)
motion_context_list.append(motion_context)
pred_list.append(pred_kp3d)
pred_kp3d = torch.cat(pred_list[1:], dim=1).view(self.b, self.f, -1, 3)
motion_context = torch.cat(motion_context_list, dim=1)
# Merge 3D keypoints with motion context
motion_context = torch.cat((motion_context, pred_kp3d.reshape(self.b, self.f, -1)), dim=-1)
return pred_kp3d, motion_context
class TrajectoryDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
# Trajectory regressor
self.regressor = Regressor(
d_embed, d_embed, [3, 6], 12, rnn_type, n_layers, )
def forward(self, x, root, cam_a, h0=None):
""" Forward pass of trajectory decoder.
"""
b, f = x.shape[:2]
pred_root_list, pred_vel_list = [root[:, :1]], []
for i in range(f):
# Global coordinate estimation
(pred_rootv, pred_rootr), _, h0 = self.regressor(
x[:, [i]], [pred_root_list[-1], cam_a[:, [i]]], h0)
pred_root_list.append(pred_rootr)
pred_vel_list.append(pred_rootv)
pred_root = torch.cat(pred_root_list, dim=1).view(b, f + 1, -1)
pred_vel = torch.cat(pred_vel_list, dim=1).view(b, f, -1)
return pred_root, pred_vel
class MotionDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
self.n_pose = 24
# SMPL pose initialization
self.neural_init = NeuralInitialization(len(_C.BMODEL.MAIN_JOINTS) * 6, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [self.n_pose * 6, 10, 3, 4], self.n_pose * 6, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion decoder.
"""
b, f = x.shape[:2]
h0 = self.neural_init(init[:, :, _C.BMODEL.MAIN_JOINTS].reshape(b, 1, -1))
# Recursive prediction of SMPL parameters
pred_pose_list = [init.reshape(b, 1, -1)]
pred_shape_list, pred_cam_list, pred_contact_list = [], [], []
for i in range(f):
# Camera coordinate estimation
(pred_pose, pred_shape, pred_cam, pred_contact), _, h0 = self.regressor(x[:, [i]], pred_pose_list[-1:], h0)
pred_pose_list.append(pred_pose)
pred_shape_list.append(pred_shape)
pred_cam_list.append(pred_cam)
pred_contact_list.append(pred_contact)
pred_pose = torch.cat(pred_pose_list[1:], dim=1).view(b, f, -1)
pred_shape = torch.cat(pred_shape_list, dim=1).view(b, f, -1)
pred_cam = torch.cat(pred_cam_list, dim=1).view(b, f, -1)
pred_contact = torch.cat(pred_contact_list, dim=1).view(b, f, -1)
return pred_pose, pred_shape, pred_cam, pred_contact
class TrajectoryRefiner(nn.Module):
def __init__(self,
d_embed,
d_hidden,
rnn_type,
n_layers):
super().__init__()
d_input = d_embed + 12
self.refiner = Regressor(
d_input, d_hidden, [6, 3], 9, rnn_type, n_layers)
def forward(self, context, pred_vel, output, cam_angvel, return_y_up):
b, f = context.shape[:2]
# Register values
pred_pose = output['pose'].clone().detach()
pred_root = output['poses_root_r6d'].clone().detach()
feet = output['feet'].clone().detach()
contact = output['contact'].clone().detach()
feet_vel = torch.cat((torch.zeros_like(feet[:, :1]), feet[:, 1:] - feet[:, :-1]), dim=1) * 30 # Normalize to 30 times
feet = (feet_vel * contact.unsqueeze(-1)).reshape(b, f, -1) # Velocity input
inpt_feat = torch.cat([context, feet], dim=-1)
(delta_root, delta_vel), _, _ = self.refiner(inpt_feat, [pred_root[:, 1:], pred_vel], h0=None)
pred_root[:, 1:] = pred_root[:, 1:] + delta_root
pred_vel = pred_vel + delta_vel
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Regressor(nn.Module):
def __init__(self, in_dim, hid_dim, out_dims, init_dim, layer='LSTM', n_layers=2, n_iters=1):
super().__init__()
self.n_outs = len(out_dims)
self.rnn = getattr(nn, layer.upper())(
in_dim + init_dim, hid_dim, n_layers,
bidirectional=False, batch_first=True, dropout=0.3)
for i, out_dim in enumerate(out_dims):
setattr(self, 'declayer%d'%i, nn.Linear(hid_dim, out_dim))
nn.init.xavier_uniform_(getattr(self, 'declayer%d'%i).weight, gain=0.01)
def forward(self, x, inits, h0):
xc = torch.cat([x, *inits], dim=-1)
xc, h0 = self.rnn(xc, h0)
preds = []
for j in range(self.n_outs):
out = getattr(self, 'declayer%d'%j)(xc)
preds.append(out)
return preds, xc, h0
class NeuralInitialization(nn.Module):
def __init__(self, in_dim, hid_dim, layer, n_layers):
super().__init__()
out_dim = hid_dim
self.n_layers = n_layers
self.num_inits = int(layer.upper() == 'LSTM') + 1
out_dim *= self.num_inits * n_layers
self.linear1 = nn.Linear(in_dim, hid_dim)
self.linear2 = nn.Linear(hid_dim, hid_dim * self.n_layers)
self.linear3 = nn.Linear(hid_dim * self.n_layers, out_dim)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x):
b = x.shape[0]
out = self.linear3(self.relu2(self.linear2(self.relu1(self.linear1(x)))))
out = out.view(b, self.num_inits, self.n_layers, -1).permute(1, 2, 0, 3).contiguous()
if self.num_inits == 2:
return tuple([_ for _ in out])
return out[0]
class Integrator(nn.Module):
def __init__(self, in_channel, out_channel, hid_channel=1024):
super().__init__()
self.layer1 = nn.Linear(in_channel, hid_channel)
self.relu1 = nn.ReLU()
self.dr1 = nn.Dropout(0.1)
self.layer2 = nn.Linear(hid_channel, hid_channel)
self.relu2 = nn.ReLU()
self.dr2 = nn.Dropout(0.1)
self.layer3 = nn.Linear(hid_channel, out_channel)
def forward(self, x, feat):
res = x
mask = (feat != 0).all(dim=-1).all(dim=-1)
out = torch.cat((x, feat), dim=-1)
out = self.layer1(out)
out = self.relu1(out)
out = self.dr1(out)
out = self.layer2(out)
out = self.relu2(out)
out = self.dr2(out)
out = self.layer3(out)
out[mask] = out[mask] + res[mask]
return out
class MotionEncoder(nn.Module):
def __init__(self,
in_dim,
d_embed,
pose_dr,
rnn_type,
n_layers,
n_joints):
super().__init__()
self.n_joints = n_joints
self.embed_layer = nn.Linear(in_dim, d_embed)
self.pos_drop = nn.Dropout(pose_dr)
# Keypoints initializer
self.neural_init = NeuralInitialization(n_joints * 3 + in_dim, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [n_joints * 3], n_joints * 3, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion encoder.
"""
self.b, self.f = x.shape[:2]
x = self.embed_layer(x.reshape(self.b, self.f, -1))
x = self.pos_drop(x)
h0 = self.neural_init(init)
pred_list = [init[..., :self.n_joints * 3]]
motion_context_list = []
for i in range(self.f):
(pred_kp3d, ), motion_context, h0 = self.regressor(x[:, [i]], pred_list[-1:], h0)
motion_context_list.append(motion_context)
pred_list.append(pred_kp3d)
pred_kp3d = torch.cat(pred_list[1:], dim=1).view(self.b, self.f, -1, 3)
motion_context = torch.cat(motion_context_list, dim=1)
# Merge 3D keypoints with motion context
motion_context = torch.cat((motion_context, pred_kp3d.reshape(self.b, self.f, -1)), dim=-1)
return pred_kp3d, motion_context
class TrajectoryDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
# Trajectory regressor
self.regressor = Regressor(
d_embed, d_embed, [3, 6], 12, rnn_type, n_layers, )
def forward(self, x, root, cam_a, h0=None):
""" Forward pass of trajectory decoder.
"""
b, f = x.shape[:2]
pred_root_list, pred_vel_list = [root[:, :1]], []
for i in range(f):
# Global coordinate estimation
(pred_rootv, pred_rootr), _, h0 = self.regressor(
x[:, [i]], [pred_root_list[-1], cam_a[:, [i]]], h0)
pred_root_list.append(pred_rootr)
pred_vel_list.append(pred_rootv)
pred_root = torch.cat(pred_root_list, dim=1).view(b, f + 1, -1)
pred_vel = torch.cat(pred_vel_list, dim=1).view(b, f, -1)
return pred_root, pred_vel
class MotionDecoder(nn.Module):
def __init__(self,
d_embed,
rnn_type,
n_layers):
super().__init__()
self.n_pose = 24
# SMPL pose initialization
self.neural_init = NeuralInitialization(len(_C.BMODEL.MAIN_JOINTS) * 6, d_embed, rnn_type, n_layers)
# 3d keypoints regressor
self.regressor = Regressor(
d_embed, d_embed, [self.n_pose * 6, 10, 3, 4], self.n_pose * 6, rnn_type, n_layers)
def forward(self, x, init):
""" Forward pass of motion decoder.
"""
b, f = x.shape[:2]
h0 = self.neural_init(init[:, :, _C.BMODEL.MAIN_JOINTS].reshape(b, 1, -1))
# Recursive prediction of SMPL parameters
pred_pose_list = [init.reshape(b, 1, -1)]
pred_shape_list, pred_cam_list, pred_contact_list = [], [], []
for i in range(f):
# Camera coordinate estimation
(pred_pose, pred_shape, pred_cam, pred_contact), _, h0 = self.regressor(x[:, [i]], pred_pose_list[-1:], h0)
pred_pose_list.append(pred_pose)
pred_shape_list.append(pred_shape)
pred_cam_list.append(pred_cam)
pred_contact_list.append(pred_contact)
pred_pose = torch.cat(pred_pose_list[1:], dim=1).view(b, f, -1)
pred_shape = torch.cat(pred_shape_list, dim=1).view(b, f, -1)
pred_cam = torch.cat(pred_cam_list, dim=1).view(b, f, -1)
pred_contact = torch.cat(pred_contact_list, dim=1).view(b, f, -1)
return pred_pose, pred_shape, pred_cam, pred_contact
class TrajectoryRefiner(nn.Module):
def __init__(self,
d_embed,
d_hidden,
rnn_type,
n_layers):
super().__init__()
d_input = d_embed + 12
self.refiner = Regressor(
d_input, d_hidden, [6, 3], 9, rnn_type, n_layers)
def forward(self, context, pred_vel, output, cam_angvel, return_y_up):
b, f = context.shape[:2]
# Register values
pred_pose = output['pose'].clone().detach()
pred_root = output['poses_root_r6d'].clone().detach()
feet = output['feet'].clone().detach()
contact = output['contact'].clone().detach()
feet_vel = torch.cat((torch.zeros_like(feet[:, :1]), feet[:, 1:] - feet[:, :-1]), dim=1) * 30 # Normalize to 30 times
feet = (feet_vel * contact.unsqueeze(-1)).reshape(b, f, -1) # Velocity input
inpt_feat = torch.cat([context, feet], dim=-1)
(delta_root, delta_vel), _, _ = self.refiner(inpt_feat, [pred_root[:, 1:], pred_vel], h0=None)
pred_root[:, 1:] = pred_root[:, 1:] + delta_root
pred_vel = pred_vel + delta_vel
| root_world, trans_world = rollout_global_motion(pred_root, pred_vel) | 1 | 2023-12-08 09:17:54+00:00 | 4k |
Pointcept/PointTransformerV3 | serialization/default.py | [
{
"identifier": "xyz2key",
"path": "serialization/z_order.py",
"snippet": "def xyz2key(self, x, y, z, depth):\r\n key = torch.zeros_like(x)\r\n for i in range(depth):\r\n mask = 1 << i\r\n key = (\r\n key\r\n | ((x & mask) << (2 * i + 2))\r\n | ((y & ... | import torch
from .z_order import xyz2key as z_order_encode_
from .z_order import key2xyz as z_order_decode_
from .hilbert import encode as hilbert_encode_
from .hilbert import decode as hilbert_decode_
| 3,088 |
@torch.inference_mode()
def encode(grid_coord, batch=None, depth=16, order="z"):
assert order in {"z", "z-trans", "hilbert", "hilbert-trans"}
if order == "z":
code = z_order_encode(grid_coord, depth=depth)
elif order == "z-trans":
code = z_order_encode(grid_coord[:, [1, 0, 2]], depth=depth)
elif order == "hilbert":
code = hilbert_encode(grid_coord, depth=depth)
elif order == "hilbert-trans":
code = hilbert_encode(grid_coord[:, [1, 0, 2]], depth=depth)
else:
raise NotImplementedError
if batch is not None:
batch = batch.long()
code = batch << depth * 3 | code
return code
@torch.inference_mode()
def decode(code, depth=16, order="z"):
assert order in {"z", "hilbert"}
batch = code >> depth * 3
code = code & ((1 << depth * 3) - 1)
if order == "z":
grid_coord = z_order_decode(code, depth=depth)
elif order == "hilbert":
grid_coord = hilbert_decode(code, depth=depth)
else:
raise NotImplementedError
return grid_coord, batch
def z_order_encode(grid_coord: torch.Tensor, depth: int = 16):
x, y, z = grid_coord[:, 0].long(), grid_coord[:, 1].long(), grid_coord[:, 2].long()
# we block the support to batch, maintain batched code in Point class
code = z_order_encode_(x, y, z, b=None, depth=depth)
return code
def z_order_decode(code: torch.Tensor, depth):
x, y, z = z_order_decode_(code, depth=depth)
grid_coord = torch.stack([x, y, z], dim=-1) # (N, 3)
return grid_coord
def hilbert_encode(grid_coord: torch.Tensor, depth: int = 16):
return hilbert_encode_(grid_coord, num_dims=3, num_bits=depth)
def hilbert_decode(code: torch.Tensor, depth: int = 16):
|
@torch.inference_mode()
def encode(grid_coord, batch=None, depth=16, order="z"):
assert order in {"z", "z-trans", "hilbert", "hilbert-trans"}
if order == "z":
code = z_order_encode(grid_coord, depth=depth)
elif order == "z-trans":
code = z_order_encode(grid_coord[:, [1, 0, 2]], depth=depth)
elif order == "hilbert":
code = hilbert_encode(grid_coord, depth=depth)
elif order == "hilbert-trans":
code = hilbert_encode(grid_coord[:, [1, 0, 2]], depth=depth)
else:
raise NotImplementedError
if batch is not None:
batch = batch.long()
code = batch << depth * 3 | code
return code
@torch.inference_mode()
def decode(code, depth=16, order="z"):
assert order in {"z", "hilbert"}
batch = code >> depth * 3
code = code & ((1 << depth * 3) - 1)
if order == "z":
grid_coord = z_order_decode(code, depth=depth)
elif order == "hilbert":
grid_coord = hilbert_decode(code, depth=depth)
else:
raise NotImplementedError
return grid_coord, batch
def z_order_encode(grid_coord: torch.Tensor, depth: int = 16):
x, y, z = grid_coord[:, 0].long(), grid_coord[:, 1].long(), grid_coord[:, 2].long()
# we block the support to batch, maintain batched code in Point class
code = z_order_encode_(x, y, z, b=None, depth=depth)
return code
def z_order_decode(code: torch.Tensor, depth):
x, y, z = z_order_decode_(code, depth=depth)
grid_coord = torch.stack([x, y, z], dim=-1) # (N, 3)
return grid_coord
def hilbert_encode(grid_coord: torch.Tensor, depth: int = 16):
return hilbert_encode_(grid_coord, num_dims=3, num_bits=depth)
def hilbert_decode(code: torch.Tensor, depth: int = 16):
| return hilbert_decode_(code, num_dims=3, num_bits=depth)
| 1 | 2023-12-06 08:32:43+00:00 | 4k |
octo-models/octo | octo/model/octo_model.py | [
{
"identifier": "TextProcessor",
"path": "octo/data/utils/text_processing.py",
"snippet": "class TextProcessor(ABC):\n \"\"\"\n Base class for text tokenization or text embedding.\n \"\"\"\n\n @abstractmethod\n def encode(self, strings: Sequence[str]):\n raise NotImplementedError"
... | from functools import partial
from typing import Any, Optional, Tuple
from flax import struct
from flax.training import orbax_utils
from jax.experimental import multihost_utils
from jax.typing import ArrayLike
from octo.data.utils.text_processing import TextProcessor
from octo.model.components.action_heads import ActionHead
from octo.model.octo_module import OctoModule
from octo.utils.spec import ModuleSpec
from octo.utils.typing import Config, Data, Params, PRNGKey, Sequence
import json
import logging
import flax
import jax
import jax.numpy as jnp
import numpy as np
import orbax.checkpoint
import tensorflow as tf
import huggingface_hub | 3,241 |
Usage for pretraining:
>>> model = OctoModel.from_config(
config,
example_batch,
text_processor
) # initializes params
>>> # Continue as in finetuning example
See full usage examples in train.py and finetune.py.
"""
module: OctoModule = struct.field(pytree_node=False)
text_processor: TextProcessor = struct.field(pytree_node=False)
config: Config = struct.field(pytree_node=False)
params: Params
example_batch: Data
dataset_statistics: Optional[Data]
def create_tasks(
self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None
):
"""Creates tasks dict from goals and texts.
Args:
goals: if not None, dict of arrays with shape (batch_size, *)
texts: if not None, list of texts of length batch_size
Omit images to run the language-conditioned model, and omit texts to run the
goal-conditioned model.
"""
assert goals is not None or texts is not None
tasks = {"pad_mask_dict": {}}
if goals is not None:
tasks.update(goals)
tasks["pad_mask_dict"].update(
{k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}
)
else:
batch_size = len(texts)
tasks.update(
{
k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)
for k, v in self.example_batch["task"].items()
if k not in ("pad_mask_dict", "language_instruction")
}
)
tasks["pad_mask_dict"].update(
{
k: np.zeros(batch_size, dtype=bool)
for k in tasks.keys()
if k != "pad_mask_dict"
}
)
if texts is not None:
assert self.text_processor is not None
tasks["language_instruction"] = texts
tasks["pad_mask_dict"]["language_instruction"] = np.ones(
len(texts), dtype=bool
)
else:
batch_size = jax.tree_leaves(goals)[0].shape[0]
tasks["language_instruction"] = [""] * batch_size
tasks["pad_mask_dict"]["language_instruction"] = np.zeros(
batch_size, dtype=bool
)
if self.text_processor is not None:
tasks["language_instruction"] = self.text_processor.encode(
tasks["language_instruction"]
)
else:
del tasks["language_instruction"]
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return tasks
@partial(jax.jit, static_argnames=("train",))
def run_transformer(
self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False
):
"""Runs the transformer, but does shape checking on the inputs.
Args:
observations: dictionary of arrays of shape (batch_size, window_size, *shape).
Shape must be consistent with self.example_batch["observation"]
tasks: dict of tasks of shape (batch_size, *shape)
Shape must be consistent with self.example_batch["task"]
pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding
train: whether to run in train mode
"""
_verify_shapes(
observations,
"observations",
self.example_batch["observation"],
starting_dim=2,
)
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return self.module.apply(
{"params": self.params},
observations,
tasks,
pad_mask,
train=train,
method="octo_transformer",
)
@partial(jax.jit, static_argnames=("train", "sample_shape", "argmax"))
def sample_actions(
self,
observations: Data,
tasks: Data,
pad_mask: Optional[ArrayLike] = None,
train: bool = False,
argmax: bool = False,
sample_shape: Tuple[int, ...] = (),
|
@struct.dataclass
class OctoModel:
"""Recommended way of interacting with Octo models.
Usage for inference:
>>> model = OctoModel.load_pretrained(checkpoint_dir)
>>> tasks = model.create_tasks(texts=["go to the red room"])
>>> # or tasks = model.create_tasks(goals={"image_primary": goal_images})
>>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))
>>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,
# un-normalize them using model.dataset_statistics
Usage for finetuning:
>>> model = OctoModel.load_pretrained(checkpoint_dir)
>>> train_state = octo.utils.train_utils.TrainState.create(
rng=jax.random.PRNGKey(0),
model=model,
tx=optax.adamw(...)
)
>>> # access params through train_state.model.params
>>> train_state, metrics = your_update_function(train_state, batch)
>>> # when it's time to save (note that this only saves the model parameters,
>>> # not the full optimizer state)
>>> train_state.model.save_pretrained(step, save_dir)
Usage for pretraining:
>>> model = OctoModel.from_config(
config,
example_batch,
text_processor
) # initializes params
>>> # Continue as in finetuning example
See full usage examples in train.py and finetune.py.
"""
module: OctoModule = struct.field(pytree_node=False)
text_processor: TextProcessor = struct.field(pytree_node=False)
config: Config = struct.field(pytree_node=False)
params: Params
example_batch: Data
dataset_statistics: Optional[Data]
def create_tasks(
self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None
):
"""Creates tasks dict from goals and texts.
Args:
goals: if not None, dict of arrays with shape (batch_size, *)
texts: if not None, list of texts of length batch_size
Omit images to run the language-conditioned model, and omit texts to run the
goal-conditioned model.
"""
assert goals is not None or texts is not None
tasks = {"pad_mask_dict": {}}
if goals is not None:
tasks.update(goals)
tasks["pad_mask_dict"].update(
{k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}
)
else:
batch_size = len(texts)
tasks.update(
{
k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)
for k, v in self.example_batch["task"].items()
if k not in ("pad_mask_dict", "language_instruction")
}
)
tasks["pad_mask_dict"].update(
{
k: np.zeros(batch_size, dtype=bool)
for k in tasks.keys()
if k != "pad_mask_dict"
}
)
if texts is not None:
assert self.text_processor is not None
tasks["language_instruction"] = texts
tasks["pad_mask_dict"]["language_instruction"] = np.ones(
len(texts), dtype=bool
)
else:
batch_size = jax.tree_leaves(goals)[0].shape[0]
tasks["language_instruction"] = [""] * batch_size
tasks["pad_mask_dict"]["language_instruction"] = np.zeros(
batch_size, dtype=bool
)
if self.text_processor is not None:
tasks["language_instruction"] = self.text_processor.encode(
tasks["language_instruction"]
)
else:
del tasks["language_instruction"]
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return tasks
@partial(jax.jit, static_argnames=("train",))
def run_transformer(
self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False
):
"""Runs the transformer, but does shape checking on the inputs.
Args:
observations: dictionary of arrays of shape (batch_size, window_size, *shape).
Shape must be consistent with self.example_batch["observation"]
tasks: dict of tasks of shape (batch_size, *shape)
Shape must be consistent with self.example_batch["task"]
pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding
train: whether to run in train mode
"""
_verify_shapes(
observations,
"observations",
self.example_batch["observation"],
starting_dim=2,
)
_verify_shapes(tasks, "tasks", self.example_batch["task"], starting_dim=1)
return self.module.apply(
{"params": self.params},
observations,
tasks,
pad_mask,
train=train,
method="octo_transformer",
)
@partial(jax.jit, static_argnames=("train", "sample_shape", "argmax"))
def sample_actions(
self,
observations: Data,
tasks: Data,
pad_mask: Optional[ArrayLike] = None,
train: bool = False,
argmax: bool = False,
sample_shape: Tuple[int, ...] = (), | rng: Optional[PRNGKey] = None, | 4 | 2023-12-13 09:58:56+00:00 | 4k |
LinShan-Bin/OccNeRF | utils/vox.py | [
{
"identifier": "geom",
"path": "utils/geom.py",
"snippet": "def eye_4x4(B, device='cuda'):\ndef safe_inverse(a): #parallel version\ndef safe_inverse_single(a):\ndef apply_4x4(RT, xyz):\ndef get_camM_T_camXs(origin_T_camXs, ind=0):\ndef split_rt_single(rt):\ndef split_rt(rt):\ndef merge_rt(r, t):\ndef x... | import pdb
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import geom
from utils import basic
from utils import render | 2,826 |
def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):
"""
Convert 3D world coordinates to a contracted coordinate system based on a specified ROI.
Args:
xyz_world (torch.Tensor): Input tensor with shape [..., 3] representing 3D world coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the contracted system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_scaled = (2 * (xyz_world - xyz_min) / (xyz_max - xyz_min) - 1) * t
xyz_abs = torch.abs(xyz_scaled)
xyz_contracted = torch.where(
xyz_abs <= t,
xyz_scaled,
xyz_scaled.sign() * (1.0 + t - 1.0/(xyz_abs + 1 - t))
)
return xyz_contracted / (t + 1) # range: [-1, 1]
def contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):
"""
Convert 3D contracted coordinates back to the world coordinate system based on a specified ROI.
Args:
xyz_contracted (torch.Tensor): Input tensor with shape [..., 3] representing 3D contracted coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the world system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_ = xyz_contracted * (t + 1)
xyz_abs = torch.abs(xyz_)
xyz_scaled = torch.where(
xyz_abs <= t,
xyz_,
xyz_.sign() * (t - 1.0 + 1.0/(t + 1 - xyz_abs))
) / t
xyz_world = 0.5 * (xyz_scaled + 1) * (xyz_max - xyz_min) + xyz_min
return xyz_world
class Vox_util(nn.Module):
def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):
super(Vox_util, self).__init__()
self.opt = opt
self.XMIN, self.XMAX, self.YMIN, self.YMAX, self.ZMIN, self.ZMAX = bounds
self.Z, self.Y, self.X = Z, Y, X # 16, 256, 256
self.max_depth = math.sqrt(self.XMAX*self.XMAX + self.YMAX*self.YMAX + self.ZMAX*self.ZMAX)
self.pc_range_roi = [self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4], \
self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]] # [x_min, y_min, z_min, x_max, y_max, z_max]
scene_centroid = scene_centroid.detach().cpu().numpy()
x_centroid, y_centroid, z_centroid = scene_centroid[0]
self.XMIN += x_centroid
self.XMAX += x_centroid
self.YMIN += y_centroid
self.YMAX += y_centroid
self.ZMIN += z_centroid
self.ZMAX += z_centroid
self.default_vox_size_X = (self.XMAX-self.XMIN)/float(X)
self.default_vox_size_Y = (self.YMAX-self.YMIN)/float(Y)
self.default_vox_size_Z = (self.ZMAX-self.ZMIN)/float(Z)
if pad:
Z_pad, Y_pad, X_pad = pad
self.ZMIN -= self.default_vox_size_Z * Z_pad
self.ZMAX += self.default_vox_size_Z * Z_pad
self.YMIN -= self.default_vox_size_Y * Y_pad
self.YMAX += self.default_vox_size_Y * Y_pad
self.XMIN -= self.default_vox_size_X * X_pad
self.XMAX += self.default_vox_size_X * X_pad
# for embedding
self.length_pose_encoding = length_pose_encoding
self.position = position
self.register_buffer('posfreq', torch.FloatTensor([(2 ** i) for i in range(length_pose_encoding)]))
if assert_cube:
# we assume cube voxels
if (not np.isclose(self.default_vox_size_X, self.default_vox_size_Y)) or (not np.isclose(self.default_vox_size_X, self.default_vox_size_Z)):
print('Z, Y, X', Z, Y, X)
print('bounds for this iter:',
'X = %.2f to %.2f' % (self.XMIN, self.XMAX),
'Y = %.2f to %.2f' % (self.YMIN, self.YMAX),
'Z = %.2f to %.2f' % (self.ZMIN, self.ZMAX),
)
print('self.default_vox_size_X', self.default_vox_size_X)
print('self.default_vox_size_Y', self.default_vox_size_Y)
print('self.default_vox_size_Z', self.default_vox_size_Z)
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Y))
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Z))
def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):
# xyz is B x N x 3, in ref coordinates
# transforms ref coordinates into mem coordinates
B, N, C = list(xyz.shape)
device = xyz.device
assert(C==3)
mem_T_ref = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=device)
|
def world2contracted(xyz_world, pc_range_roi=[-52, -52, 0, 52, 52, 6], ratio=0.8):
"""
Convert 3D world coordinates to a contracted coordinate system based on a specified ROI.
Args:
xyz_world (torch.Tensor): Input tensor with shape [..., 3] representing 3D world coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the contracted system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_world).reshape([1]*len(xyz_world.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_scaled = (2 * (xyz_world - xyz_min) / (xyz_max - xyz_min) - 1) * t
xyz_abs = torch.abs(xyz_scaled)
xyz_contracted = torch.where(
xyz_abs <= t,
xyz_scaled,
xyz_scaled.sign() * (1.0 + t - 1.0/(xyz_abs + 1 - t))
)
return xyz_contracted / (t + 1) # range: [-1, 1]
def contracted2world(xyz_contracted, pc_range_roi=[-80, -80, -3, 80, 80, 8], ratio=0.8):
"""
Convert 3D contracted coordinates back to the world coordinate system based on a specified ROI.
Args:
xyz_contracted (torch.Tensor): Input tensor with shape [..., 3] representing 3D contracted coordinates.
pc_range_roi (list, optional): List of 6 elements defining the ROI. Default is [-52, -52, 0, 52, 52, 6].
Returns:
torch.Tensor: Tensor with shape [..., 3] representing coordinates in the world system.
"""
xyz_min = torch.tensor(pc_range_roi[:3]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
xyz_max = torch.tensor(pc_range_roi[3:]).to(xyz_contracted).reshape([1]*len(xyz_contracted.shape[:-1]) + [3])
t = ratio / (1 - ratio)
xyz_ = xyz_contracted * (t + 1)
xyz_abs = torch.abs(xyz_)
xyz_scaled = torch.where(
xyz_abs <= t,
xyz_,
xyz_.sign() * (t - 1.0 + 1.0/(t + 1 - xyz_abs))
) / t
xyz_world = 0.5 * (xyz_scaled + 1) * (xyz_max - xyz_min) + xyz_min
return xyz_world
class Vox_util(nn.Module):
def __init__(self, Z, Y, X, scene_centroid, bounds, position = 'embedding', length_pose_encoding = 3, opt = None, pad=None, assert_cube=False):
super(Vox_util, self).__init__()
self.opt = opt
self.XMIN, self.XMAX, self.YMIN, self.YMAX, self.ZMIN, self.ZMAX = bounds
self.Z, self.Y, self.X = Z, Y, X # 16, 256, 256
self.max_depth = math.sqrt(self.XMAX*self.XMAX + self.YMAX*self.YMAX + self.ZMAX*self.ZMAX)
self.pc_range_roi = [self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4], \
self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]] # [x_min, y_min, z_min, x_max, y_max, z_max]
scene_centroid = scene_centroid.detach().cpu().numpy()
x_centroid, y_centroid, z_centroid = scene_centroid[0]
self.XMIN += x_centroid
self.XMAX += x_centroid
self.YMIN += y_centroid
self.YMAX += y_centroid
self.ZMIN += z_centroid
self.ZMAX += z_centroid
self.default_vox_size_X = (self.XMAX-self.XMIN)/float(X)
self.default_vox_size_Y = (self.YMAX-self.YMIN)/float(Y)
self.default_vox_size_Z = (self.ZMAX-self.ZMIN)/float(Z)
if pad:
Z_pad, Y_pad, X_pad = pad
self.ZMIN -= self.default_vox_size_Z * Z_pad
self.ZMAX += self.default_vox_size_Z * Z_pad
self.YMIN -= self.default_vox_size_Y * Y_pad
self.YMAX += self.default_vox_size_Y * Y_pad
self.XMIN -= self.default_vox_size_X * X_pad
self.XMAX += self.default_vox_size_X * X_pad
# for embedding
self.length_pose_encoding = length_pose_encoding
self.position = position
self.register_buffer('posfreq', torch.FloatTensor([(2 ** i) for i in range(length_pose_encoding)]))
if assert_cube:
# we assume cube voxels
if (not np.isclose(self.default_vox_size_X, self.default_vox_size_Y)) or (not np.isclose(self.default_vox_size_X, self.default_vox_size_Z)):
print('Z, Y, X', Z, Y, X)
print('bounds for this iter:',
'X = %.2f to %.2f' % (self.XMIN, self.XMAX),
'Y = %.2f to %.2f' % (self.YMIN, self.YMAX),
'Z = %.2f to %.2f' % (self.ZMIN, self.ZMAX),
)
print('self.default_vox_size_X', self.default_vox_size_X)
print('self.default_vox_size_Y', self.default_vox_size_Y)
print('self.default_vox_size_Z', self.default_vox_size_Z)
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Y))
assert(np.isclose(self.default_vox_size_X, self.default_vox_size_Z))
def Ref2Mem(self, xyz, Z, Y, X, assert_cube=False):
# xyz is B x N x 3, in ref coordinates
# transforms ref coordinates into mem coordinates
B, N, C = list(xyz.shape)
device = xyz.device
assert(C==3)
mem_T_ref = self.get_mem_T_ref(B, Z, Y, X, assert_cube=assert_cube, device=device) | xyz = geom.apply_4x4(mem_T_ref, xyz) | 0 | 2023-12-14 15:00:21+00:00 | 4k |
modelscope/richdreamer | threestudio/models/geometry/implicit_volume.py | [
{
"identifier": "BaseGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Uni... | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.geometry.base import (BaseGeometry,
BaseImplicitGeometry,
contract_to_unisphere,)
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import * | 2,944 |
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure()
|
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure() | self.encoding = get_encoding( | 3 | 2023-12-06 07:53:11+00:00 | 4k |
rehg-lab/RAVE | annotator/mmpkg/mmcv/runner/base_runner.py | [
{
"identifier": "is_module_wrapper",
"path": "annotator/mmpkg/mmcv/parallel/utils.py",
"snippet": "def is_module_wrapper(module):\n \"\"\"Check if a module is a module wrapper.\n\n The following 3 modules in MMCV (and their subclasses) are regarded as\n module wrappers: DataParallel, Distribute... | import copy
import logging
import os.path as osp
import warnings
import torch
import annotator.mmpkg.mmcv as mmcv
from abc import ABCMeta, abstractmethod
from torch.optim import Optimizer
from ..parallel import is_module_wrapper
from .checkpoint import load_checkpoint
from .dist_utils import get_dist_info
from .hooks import HOOKS, Hook
from .log_buffer import LogBuffer
from .priority import Priority, get_priority
from .utils import get_time_str | 2,979 | # Copyright (c) OpenMMLab. All rights reserved.
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for PyTorch.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``val()``
- ``save_checkpoint()``
Args:
model (:obj:`torch.nn.Module`): The model to be run.
batch_processor (callable): A callable method that process a data
batch. The interface of this method should be
`batch_processor(model, data, train_mode) -> dict`
optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
optimizer (in most cases) or a dict of optimizers (in models that
requires more than one optimizer, e.g., GAN).
work_dir (str, optional): The working directory to save checkpoints
and logs. Defaults to None.
logger (:obj:`logging.Logger`): Logger used during training.
Defaults to None. (The default value is just for backward
compatibility)
meta (dict | None): A dict records some import information such as
environment info and seed, which will be logged in logger hook.
Defaults to None.
max_epochs (int, optional): Total training epochs.
max_iters (int, optional): Total training iterations.
"""
def __init__(self,
model,
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None):
if batch_processor is not None:
if not callable(batch_processor):
raise TypeError('batch_processor must be callable, '
f'but got {type(batch_processor)}')
warnings.warn('batch_processor is deprecated, please implement '
'train_step() and val_step() in the model instead.')
# raise an error is `batch_processor` is not None and
# `model.train_step()` exists.
if is_module_wrapper(model):
_model = model.module
else:
_model = model
if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
raise RuntimeError(
'batch_processor and model.train_step()/model.val_step() '
'cannot be both available.')
else:
assert hasattr(model, 'train_step')
# check the type of `optimizer`
if isinstance(optimizer, dict):
for name, optim in optimizer.items():
if not isinstance(optim, Optimizer):
raise TypeError(
f'optimizer must be a dict of torch.optim.Optimizers, '
f'but optimizer["{name}"] is a {type(optim)}')
elif not isinstance(optimizer, Optimizer) and optimizer is not None:
raise TypeError(
f'optimizer must be a torch.optim.Optimizer object '
f'or dict or None, but got {type(optimizer)}')
# check the type of `logger`
if not isinstance(logger, logging.Logger):
raise TypeError(f'logger must be a logging.Logger object, '
f'but got {type(logger)}')
# check the type of `meta`
if meta is not None and not isinstance(meta, dict):
raise TypeError(
f'meta must be a dict or None, but got {type(meta)}')
self.model = model
self.batch_processor = batch_processor
self.optimizer = optimizer
self.logger = logger
self.meta = meta
# create work_dir
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
# get model name from the model class
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info()
| # Copyright (c) OpenMMLab. All rights reserved.
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for PyTorch.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``val()``
- ``save_checkpoint()``
Args:
model (:obj:`torch.nn.Module`): The model to be run.
batch_processor (callable): A callable method that process a data
batch. The interface of this method should be
`batch_processor(model, data, train_mode) -> dict`
optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
optimizer (in most cases) or a dict of optimizers (in models that
requires more than one optimizer, e.g., GAN).
work_dir (str, optional): The working directory to save checkpoints
and logs. Defaults to None.
logger (:obj:`logging.Logger`): Logger used during training.
Defaults to None. (The default value is just for backward
compatibility)
meta (dict | None): A dict records some import information such as
environment info and seed, which will be logged in logger hook.
Defaults to None.
max_epochs (int, optional): Total training epochs.
max_iters (int, optional): Total training iterations.
"""
def __init__(self,
model,
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None):
if batch_processor is not None:
if not callable(batch_processor):
raise TypeError('batch_processor must be callable, '
f'but got {type(batch_processor)}')
warnings.warn('batch_processor is deprecated, please implement '
'train_step() and val_step() in the model instead.')
# raise an error is `batch_processor` is not None and
# `model.train_step()` exists.
if is_module_wrapper(model):
_model = model.module
else:
_model = model
if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
raise RuntimeError(
'batch_processor and model.train_step()/model.val_step() '
'cannot be both available.')
else:
assert hasattr(model, 'train_step')
# check the type of `optimizer`
if isinstance(optimizer, dict):
for name, optim in optimizer.items():
if not isinstance(optim, Optimizer):
raise TypeError(
f'optimizer must be a dict of torch.optim.Optimizers, '
f'but optimizer["{name}"] is a {type(optim)}')
elif not isinstance(optimizer, Optimizer) and optimizer is not None:
raise TypeError(
f'optimizer must be a torch.optim.Optimizer object '
f'or dict or None, but got {type(optimizer)}')
# check the type of `logger`
if not isinstance(logger, logging.Logger):
raise TypeError(f'logger must be a logging.Logger object, '
f'but got {type(logger)}')
# check the type of `meta`
if meta is not None and not isinstance(meta, dict):
raise TypeError(
f'meta must be a dict or None, but got {type(meta)}')
self.model = model
self.batch_processor = batch_processor
self.optimizer = optimizer
self.logger = logger
self.meta = meta
# create work_dir
if mmcv.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
mmcv.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError('"work_dir" must be a str or None')
# get model name from the model class
if hasattr(self.model, 'module'):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info() | self.timestamp = get_time_str() | 8 | 2023-12-05 02:51:53+00:00 | 4k |
worldcoin/open-iris | tests/unit_tests/nodes/eye_properties_estimation/test_occlusion_calculator.py | [
{
"identifier": "area_of_circular_segment",
"path": "tests/unit_tests/utils.py",
"snippet": "def area_of_circular_segment(circle_radius: float, delta_height: float) -> float:\n \"\"\"Compute the area of a circular segment (see source for definition).\n\n Source: https://en.wikipedia.org/wiki/Circu... | import math
import numpy as np
import pytest
from iris.io.dataclasses import EyeCenters, EyeOrientation, GeometryPolygons, NoiseMask
from iris.nodes.eye_properties_estimation.occlusion_calculator import OcclusionCalculator
from tests.unit_tests.utils import area_of_circular_segment, generate_arc, rotated_asymmetric_rectangle | 2,744 | EyeOrientation(angle=eye_orientation_angle),
EyeCenters(pupil_x=0.0, pupil_y=0.0, iris_x=0.0, iris_y=0.0),
)
result = np.vstack([result_xs, result_ys]).T
assert np.mean(np.abs(np.sort(result) - np.sort(expected_result))) < 0.5
@pytest.mark.parametrize(
"quantile_angle,upper_noise_distance,lower_noise_distance,upper_eyelid_distance,lower_eyelid_distance,eye_orientation",
[
(90, 200, 200, 200, 200, 0),
(30, 200, 200, 200, 200, 0),
(90, 200, 200, 200, 200, np.pi / 4),
(30, 200, 200, 200, 200, np.pi / 4),
(90, 100, 200, 200, 200, 0),
(90, 100, 200, 200, 200, np.pi / 6),
(30, 200, 100, 200, 200, -np.pi / 6),
(90, 0, 200, 200, 200, np.pi / 6),
(90, 100, 100, 200, 200, np.pi / 6),
(90, 0, 0, 200, 200, -np.pi / 6),
(30, 0, 0, 200, 200, -np.pi / 6),
(30, 50, 200, 200, 200, -np.pi / 6),
(90, 200, 200, 100, 100, -np.pi / 6),
(30, 200, 200, 0, 100, -np.pi / 6),
(30, 200, 200, 0, 0, -np.pi / 6),
(0, 200, 200, 0, 0, -np.pi / 6),
(45, 80, 10, 60, 50, -np.pi / 2),
],
ids=[
"occ90 - no occlusion - 0 degrees",
"occ30 - no occlusion - 0 degrees",
"occ90 - no occlusion - 45 degrees",
"occ30 - no occlusion - 45 degrees",
"occ90 - upper eyelashes half closed - 0 degrees",
"occ90 - upper eyelashes half closed - 30 degrees",
"occ30 - lower eyelashes half closed - -30 degrees",
"occ90 - upper eyelashes closed - 30 degrees",
"occ90 - both eyelashes half closed",
"occ90 - eye completely closed (eyelashes)",
"occ30 - eye completely closed (eyelashes)",
"occ30 - upper eyelashes half occluded",
"occ90 - both eyelids half occluded",
"occ30 - upper eyelid occluded",
"occ30 - eye completely closed (eyelids)",
"occ0",
"occ45 - some eyelash and eyelid occlusion - 90 degrees",
],
)
def test_occlusion_calculation(
quantile_angle: float,
upper_noise_distance: int,
lower_noise_distance: int,
upper_eyelid_distance: int,
lower_eyelid_distance: int,
eye_orientation: float,
) -> None:
"""This function tests the occlusion_calculator in an exhaustive number of eye configurations.
Args:
quantile_angle (float): quantile of the occlusion, e.g. 90, in degrees.
upper_noise_distance (int): distance between the center of the iris and the upper eyelashes in pixels.
lower_noise_distance (int): distance between the center of the iris and the lower eyelashes in pixels.
upper_eyelid_distance (int): distance between the center of the iris and the upper eyelid in pixels.
lower_eyelid_distance (int): distance between the center of the iris and the lower eyelid in pixels.
eye_orientation (float): eye orientation in radians.
"""
# Extra hardcoded parameters
img_w, img_h = 1440, 1080
img_center_x, img_center_y = img_w / 2, img_h / 2
iris_radius = 200
pupil_radius = 50
# Mathematically computing the expected occlusion fraction
theta_occlusion = 2 * (np.pi / 2 - quantile_angle * 2 * np.pi / 360)
quantile_area_removed = iris_radius**2 / 2 * (theta_occlusion - np.sin(theta_occlusion))
area_upper_eyelashes = area_of_circular_segment(iris_radius, upper_noise_distance)
area_lower_eyelashes = area_of_circular_segment(iris_radius, lower_noise_distance)
area_upper_eyelid = area_of_circular_segment(iris_radius, upper_eyelid_distance)
area_lower_eyelid = area_of_circular_segment(iris_radius, lower_eyelid_distance)
pupil_area_not_included_in_masks = (
np.pi * pupil_radius**2
- max(
area_of_circular_segment(pupil_radius, upper_noise_distance),
area_of_circular_segment(pupil_radius, upper_eyelid_distance),
)
- max(
area_of_circular_segment(pupil_radius, lower_noise_distance),
area_of_circular_segment(pupil_radius, lower_eyelid_distance),
)
)
expected_visible_fraction = (
np.pi * iris_radius**2
- pupil_area_not_included_in_masks
- max(quantile_area_removed, area_upper_eyelid, area_upper_eyelashes)
- max(quantile_area_removed, area_lower_eyelid, area_lower_eyelashes)
) / (np.pi * iris_radius**2 - np.pi * pupil_radius**2 - 2 * quantile_area_removed)
if np.isnan(expected_visible_fraction):
expected_visible_fraction = 0.0
# Constructing the mock data
mock_eye_orientation = EyeOrientation(angle=eye_orientation)
mock_eye_centers = EyeCenters(pupil_x=img_center_x, pupil_y=img_center_y, iris_x=img_center_x, iris_y=img_center_y)
mock_pupil = generate_arc(
radius=pupil_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
)
mock_iris = generate_arc(
radius=iris_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
)
|
@pytest.fixture
def algorithm() -> OcclusionCalculator:
return OcclusionCalculator(quantile_angle=30.0)
@pytest.mark.parametrize(
"quantile_angle,eye_orientation_angle,expected_result",
[
(
90.0,
np.radians(10.0),
generate_arc(1.0, 0.0, 0.0, from_angle=0.0, to_angle=2 * np.pi, num_points=360),
),
(
30.0,
np.radians(10.0),
np.concatenate(
[
generate_arc(1.0, 0.0, 0.0, from_angle=np.radians(0), to_angle=np.radians(40), num_points=40),
generate_arc(1.0, 0.0, 0.0, from_angle=np.radians(340), to_angle=np.radians(360), num_points=20),
generate_arc(1.0, 0.0, 0.0, from_angle=np.radians(160), to_angle=np.radians(220), num_points=60),
]
),
),
],
ids=["90 degrees", "30 degrees"],
)
def test_get_quantile_points(
quantile_angle: float,
eye_orientation_angle: np.float64,
expected_result: np.ndarray,
) -> None:
mock_iris_coords = generate_arc(
radius=1.0, center_x=0.0, center_y=0.0, from_angle=0.0, to_angle=2 * np.pi, num_points=360
)
algorithm = OcclusionCalculator(quantile_angle=quantile_angle)
result_xs, result_ys = algorithm._get_quantile_points(
mock_iris_coords,
EyeOrientation(angle=eye_orientation_angle),
EyeCenters(pupil_x=0.0, pupil_y=0.0, iris_x=0.0, iris_y=0.0),
)
result = np.vstack([result_xs, result_ys]).T
assert np.mean(np.abs(np.sort(result) - np.sort(expected_result))) < 0.5
@pytest.mark.parametrize(
"quantile_angle,upper_noise_distance,lower_noise_distance,upper_eyelid_distance,lower_eyelid_distance,eye_orientation",
[
(90, 200, 200, 200, 200, 0),
(30, 200, 200, 200, 200, 0),
(90, 200, 200, 200, 200, np.pi / 4),
(30, 200, 200, 200, 200, np.pi / 4),
(90, 100, 200, 200, 200, 0),
(90, 100, 200, 200, 200, np.pi / 6),
(30, 200, 100, 200, 200, -np.pi / 6),
(90, 0, 200, 200, 200, np.pi / 6),
(90, 100, 100, 200, 200, np.pi / 6),
(90, 0, 0, 200, 200, -np.pi / 6),
(30, 0, 0, 200, 200, -np.pi / 6),
(30, 50, 200, 200, 200, -np.pi / 6),
(90, 200, 200, 100, 100, -np.pi / 6),
(30, 200, 200, 0, 100, -np.pi / 6),
(30, 200, 200, 0, 0, -np.pi / 6),
(0, 200, 200, 0, 0, -np.pi / 6),
(45, 80, 10, 60, 50, -np.pi / 2),
],
ids=[
"occ90 - no occlusion - 0 degrees",
"occ30 - no occlusion - 0 degrees",
"occ90 - no occlusion - 45 degrees",
"occ30 - no occlusion - 45 degrees",
"occ90 - upper eyelashes half closed - 0 degrees",
"occ90 - upper eyelashes half closed - 30 degrees",
"occ30 - lower eyelashes half closed - -30 degrees",
"occ90 - upper eyelashes closed - 30 degrees",
"occ90 - both eyelashes half closed",
"occ90 - eye completely closed (eyelashes)",
"occ30 - eye completely closed (eyelashes)",
"occ30 - upper eyelashes half occluded",
"occ90 - both eyelids half occluded",
"occ30 - upper eyelid occluded",
"occ30 - eye completely closed (eyelids)",
"occ0",
"occ45 - some eyelash and eyelid occlusion - 90 degrees",
],
)
def test_occlusion_calculation(
quantile_angle: float,
upper_noise_distance: int,
lower_noise_distance: int,
upper_eyelid_distance: int,
lower_eyelid_distance: int,
eye_orientation: float,
) -> None:
"""This function tests the occlusion_calculator in an exhaustive number of eye configurations.
Args:
quantile_angle (float): quantile of the occlusion, e.g. 90, in degrees.
upper_noise_distance (int): distance between the center of the iris and the upper eyelashes in pixels.
lower_noise_distance (int): distance between the center of the iris and the lower eyelashes in pixels.
upper_eyelid_distance (int): distance between the center of the iris and the upper eyelid in pixels.
lower_eyelid_distance (int): distance between the center of the iris and the lower eyelid in pixels.
eye_orientation (float): eye orientation in radians.
"""
# Extra hardcoded parameters
img_w, img_h = 1440, 1080
img_center_x, img_center_y = img_w / 2, img_h / 2
iris_radius = 200
pupil_radius = 50
# Mathematically computing the expected occlusion fraction
theta_occlusion = 2 * (np.pi / 2 - quantile_angle * 2 * np.pi / 360)
quantile_area_removed = iris_radius**2 / 2 * (theta_occlusion - np.sin(theta_occlusion))
area_upper_eyelashes = area_of_circular_segment(iris_radius, upper_noise_distance)
area_lower_eyelashes = area_of_circular_segment(iris_radius, lower_noise_distance)
area_upper_eyelid = area_of_circular_segment(iris_radius, upper_eyelid_distance)
area_lower_eyelid = area_of_circular_segment(iris_radius, lower_eyelid_distance)
pupil_area_not_included_in_masks = (
np.pi * pupil_radius**2
- max(
area_of_circular_segment(pupil_radius, upper_noise_distance),
area_of_circular_segment(pupil_radius, upper_eyelid_distance),
)
- max(
area_of_circular_segment(pupil_radius, lower_noise_distance),
area_of_circular_segment(pupil_radius, lower_eyelid_distance),
)
)
expected_visible_fraction = (
np.pi * iris_radius**2
- pupil_area_not_included_in_masks
- max(quantile_area_removed, area_upper_eyelid, area_upper_eyelashes)
- max(quantile_area_removed, area_lower_eyelid, area_lower_eyelashes)
) / (np.pi * iris_radius**2 - np.pi * pupil_radius**2 - 2 * quantile_area_removed)
if np.isnan(expected_visible_fraction):
expected_visible_fraction = 0.0
# Constructing the mock data
mock_eye_orientation = EyeOrientation(angle=eye_orientation)
mock_eye_centers = EyeCenters(pupil_x=img_center_x, pupil_y=img_center_y, iris_x=img_center_x, iris_y=img_center_y)
mock_pupil = generate_arc(
radius=pupil_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
)
mock_iris = generate_arc(
radius=iris_radius,
center_x=img_center_x,
center_y=img_center_y,
from_angle=0.0,
to_angle=2 * np.pi,
num_points=360,
) | mock_eyeball = rotated_asymmetric_rectangle( | 2 | 2023-12-09 22:43:09+00:00 | 4k |
DiffusionLight/DiffusionLight | relighting/pipeline_inpaintonly.py | [
{
"identifier": "custom_prepare_latents",
"path": "relighting/pipeline_utils.py",
"snippet": "def custom_prepare_latents(\n self,\n batch_size,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n ... | import torch
from typing import List, Union, Dict, Any, Callable, Optional, Tuple
from diffusers.image_processor import PipelineImageInput
from diffusers import StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline
from diffusers.models import AsymmetricAutoencoderKL
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
from relighting.pipeline_utils import custom_prepare_latents, custom_prepare_mask_latents, rescale_noise_cfg | 2,392 |
class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
masked_image_latents: torch.FloatTensor = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 1.0,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
newx: int = 0,
newy: int = 0,
newr: int = 256,
current_seed=0,
use_noise_moving=True,
):
# OVERWRITE METHODS
self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline)
|
class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
masked_image_latents: torch.FloatTensor = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 1.0,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
newx: int = 0,
newy: int = 0,
newr: int = 256,
current_seed=0,
use_noise_moving=True,
):
# OVERWRITE METHODS
self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline) | self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionInpaintPipeline) | 0 | 2023-12-07 14:03:31+00:00 | 4k |
laixintao/mactop | mactop/panels/m1_gpu.py | [
{
"identifier": "DynamicText",
"path": "mactop/widgets/dynamic_text.py",
"snippet": "class DynamicText(Static):\n value = reactive(None)\n\n DEFAULT_CSS = \"\"\"\n DynamicText {\n layout: horizontal;\n }\n \n \"\"\"\n\n def __init__(\n self,\n prefix_label,\n ... | from textual.app import ComposeResult
from mactop.widgets import DynamicText
from mactop.metrics_store import metrics
from ._base import BaseStatic
from mactop import const
from mactop.widgets import LabeledColorBar, LabeledSparkline | 2,134 |
def refresh_callback(*_):
gpu_freq = metrics.get_powermetrics().m1_gpu.freq_hz
return gpu_freq
class GPUFreqText(BaseStatic):
BORDER_TITLE = "GPU Freq"
def __init__(self, label="GPU Freq: ", *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = label
def compose(self) -> ComposeResult:
yield DynamicText(
prefix_label=self.label,
update_fn=refresh_callback,
value_render_fn=lambda x: f"{x:.2f}MHz",
classes="gpu-freq-text",
update_interval=self.refresh_interval,
)
def get_gpu_usage():
idle = metrics.get_powermetrics().m1_gpu.idle_ratio
if idle is None:
return [0, 1]
busy = 1 - idle
return [busy, idle]
def display_gpu_ration(x):
if not x:
return "NA%"
return f"{x[0]*100:.2f}%"
class GPUUsageBarPanel(BaseStatic):
def __init__(
self,
color_busy=const.COLOR_USER,
color_idle=const.COLOR_IDLE,
label="GPU: ",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_busy = color_busy
self.color_idle = color_idle
self.label = label
def compose(self) -> ComposeResult:
|
def refresh_callback(*_):
gpu_freq = metrics.get_powermetrics().m1_gpu.freq_hz
return gpu_freq
class GPUFreqText(BaseStatic):
BORDER_TITLE = "GPU Freq"
def __init__(self, label="GPU Freq: ", *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = label
def compose(self) -> ComposeResult:
yield DynamicText(
prefix_label=self.label,
update_fn=refresh_callback,
value_render_fn=lambda x: f"{x:.2f}MHz",
classes="gpu-freq-text",
update_interval=self.refresh_interval,
)
def get_gpu_usage():
idle = metrics.get_powermetrics().m1_gpu.idle_ratio
if idle is None:
return [0, 1]
busy = 1 - idle
return [busy, idle]
def display_gpu_ration(x):
if not x:
return "NA%"
return f"{x[0]*100:.2f}%"
class GPUUsageBarPanel(BaseStatic):
def __init__(
self,
color_busy=const.COLOR_USER,
color_idle=const.COLOR_IDLE,
label="GPU: ",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.color_busy = color_busy
self.color_idle = color_idle
self.label = label
def compose(self) -> ComposeResult: | yield LabeledColorBar( | 5 | 2023-12-05 09:12:42+00:00 | 4k |
eliphatfs/zerorf | lib/core/mesh_gui.py | [
{
"identifier": "load_pose",
"path": "lib/datasets/shapenet_srn.py",
"snippet": "def load_pose(path):\n pose = np.loadtxt(path, dtype=np.float32, delimiter=' ').reshape(4, 4)\n return torch.from_numpy(pose)"
},
{
"identifier": "load_intrinsics",
"path": "lib/datasets/shapenet_srn.py",
... | import copy
import numpy as np
import torch
import torch.nn.functional as F
import dearpygui.dearpygui as dpg
from scipy.spatial.transform import Rotation as R
from mmgen.models.builder import build_module
from mmgen.apis import set_random_seed # isort:skip # noqa
from lib.datasets.shapenet_srn import load_pose, load_intrinsics | 3,331 | ### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag='_texture')
### register window
# the rendered image, as the primary window
with dpg.window(tag='_primary_window', width=self.W, height=self.H):
# add the texture
dpg.add_image('_texture')
dpg.set_primary_window('_primary_window', True)
def update_camera_status():
if self.debug:
dpg.set_value('_log_pose', self.active_cam.pose2str())
dpg.set_value('fov', self.active_cam.fovy)
dpg.set_value('radius', self.active_cam.radius)
euler = self.active_cam.euler
dpg.set_value('roll', euler[0])
dpg.set_value('elevation', euler[1])
dpg.set_value('azimuth', euler[2])
center = self.active_cam.center
dpg.set_value('center_x', center[0])
dpg.set_value('center_y', center[1])
dpg.set_value('center_z', center[2])
# control window
with dpg.window(label='Control', tag='_control_window', width=380, height=self.H, pos=[self.W, 0]):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# time
with dpg.group(horizontal=True):
dpg.add_text('Infer time: ')
dpg.add_text('no data', tag='_log_infer_time')
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data):
| # modified from torch-ngp
class OrbitCamera:
def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):
self.name = name
self.W = W
self.H = H
self.radius = r # camera distance from center
self.fovy = fovy # in degree
self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])
self.rot = copy.deepcopy(self.default_rot)
self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!
self.set_euler(euler)
# pose
@property
def pose(self):
# first move camera to radius
res = np.eye(4, dtype=np.float32)
res[2, 3] -= self.radius
# rotate
rot = np.eye(4, dtype=np.float32)
rot[:3, :3] = self.rot.as_matrix()
res = rot @ res
# translate
res[:3, 3] -= self.center
return res
def set_pose(self, pose):
self.rot = R.from_matrix(pose[:3, :3])
self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius
@property
def intrinsics(self):
focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))
return np.array([focal, focal, self.W / 2, self.H / 2])
@property
def euler(self):
return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)
def set_euler(self, euler):
self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot
def orbit(self, dx, dy):
# rotate along camera up/side axis!
side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.
rotvec_x = self.up * np.radians(-0.1 * dx)
rotvec_y = side * np.radians(-0.1 * dy)
self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
def scale(self, delta):
self.radius *= 1.1 ** (-delta)
def pan(self, dx, dy, dz=0):
# pan in camera coordinate system (careful on the sensitivity!)
self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])
def pose2str(self):
with np.printoptions(precision=3, suppress=True):
return str(self.pose)
class MeshGUI:
default_cam_fovy = 52.0
default_cam_radius = 2.6
default_cam_euler = [0.0, 23.0, -47.4]
def __init__(self, mesh, renderer, W=512, H=512, debug=True):
self.W = W
self.H = H
self.default_cam = OrbitCamera(
'default', W, H, r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
self.active_cam = self.default_cam
self.debug = debug
self.bg_color = torch.ones(3, dtype=torch.float32) # default white bg
self.step = 0 # training step
self.mesh = mesh
self.renderer = renderer
self.video_sec = 4
self.video_fps = 30
self.video_res = 256
self.render_buffer = np.zeros((self.H, self.W, 3), dtype=np.float32)
self.need_update = True # camera moved, should reset accumulation
self.mode = 'image' # choose from ['image', 'depth']
self.image_enhancer = build_module(dict(
type='SRVGGNetCompact',
# num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu',
num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu',
# pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'
pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
)).half().eval().requires_grad_(False)
if torch.cuda.is_available():
self.image_enhancer.cuda()
self.use_image_enhancer = False
self.extrinsic_ndc_scale = 2.0 # default shapenet dataset value
dpg.create_context()
if self.debug:
dpg.configure_app(manual_callback_management=True)
self.register_dpg()
self.test_step()
def __del__(self):
dpg.destroy_context()
def prepare_buffer(self, outputs):
if self.mode == 'image':
return outputs['image']
elif self.mode == 'depth':
return np.expand_dims(outputs['depth'], -1).repeat(3, -1)
elif self.mode == 'alpha':
return np.expand_dims(outputs['alpha'], -1).repeat(3, -1)
elif self.mode == 'normal':
return outputs['normal']
else:
raise ValueError(f'Unknown mode {self.mode}')
def test_gui(self, pose, intrinsics, W, H):
with torch.no_grad():
if self.use_image_enhancer and self.mode == 'image':
rH, rW = H // 2, W // 2
intrinsics = intrinsics / 2
else:
rH, rW = H, W
results = self.renderer(
[self.mesh],
torch.tensor(pose, dtype=torch.float32, device=self.mesh.device)[None, None],
torch.tensor(intrinsics, dtype=torch.float32, device=self.mesh.device)[None, None],
rH, rW)
image = results['rgba'][..., :3] + self.bg_color.to(results['rgba']) * (1 - results['rgba'][..., 3:])
if self.use_image_enhancer and self.mode == 'image':
image = self.image_enhancer(image[0].half().permute(0, 3, 1, 2))
image = F.interpolate(image, size=(H, W), mode='area').permute(0, 2, 3, 1)[None].float()
results = dict(
image=image[0, 0].cpu().numpy(),
alpha=results['rgba'][0, 0, :, :, 3].cpu().numpy(),
depth=results['depth'][0, 0].cpu().numpy(),
normal=results['normal'][0, 0].cpu().numpy())
return results
def test_step(self):
if self.need_update:
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
outputs = self.test_gui(
self.active_cam.pose, self.active_cam.intrinsics,
self.W, self.H)
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
self.render_buffer = np.ascontiguousarray(self.prepare_buffer(outputs))
self.need_update = False
dpg.set_value('_log_infer_time', f'{t:.4f}ms ({int(1000 / t)} FPS)')
dpg.set_value('_texture', self.render_buffer)
def register_dpg(self):
### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag='_texture')
### register window
# the rendered image, as the primary window
with dpg.window(tag='_primary_window', width=self.W, height=self.H):
# add the texture
dpg.add_image('_texture')
dpg.set_primary_window('_primary_window', True)
def update_camera_status():
if self.debug:
dpg.set_value('_log_pose', self.active_cam.pose2str())
dpg.set_value('fov', self.active_cam.fovy)
dpg.set_value('radius', self.active_cam.radius)
euler = self.active_cam.euler
dpg.set_value('roll', euler[0])
dpg.set_value('elevation', euler[1])
dpg.set_value('azimuth', euler[2])
center = self.active_cam.center
dpg.set_value('center_x', center[0])
dpg.set_value('center_y', center[1])
dpg.set_value('center_z', center[2])
# control window
with dpg.window(label='Control', tag='_control_window', width=380, height=self.H, pos=[self.W, 0]):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# time
with dpg.group(horizontal=True):
dpg.add_text('Infer time: ')
dpg.add_text('no data', tag='_log_infer_time')
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data): | fx, fy, cx, cy, h, w = load_intrinsics(app_data['file_path_name']) | 1 | 2023-12-14 03:29:28+00:00 | 4k |
geopavlakos/hamer | hamer/models/components/pose_transformer.py | [
{
"identifier": "AdaptiveLayerNorm1D",
"path": "hamer/models/components/t_cond_mlp.py",
"snippet": "class AdaptiveLayerNorm1D(torch.nn.Module):\n def __init__(self, data_dim: int, norm_cond_dim: int):\n super().__init__()\n if data_dim <= 0:\n raise ValueError(f\"data_dim mus... | from inspect import isfunction
from typing import Callable, Optional
from einops import rearrange
from einops.layers.torch import Rearrange
from torch import nn
from .t_cond_mlp import (
AdaptiveLayerNorm1D,
FrequencyEmbedder,
normalization_layer,
)
import torch | 2,871 | self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args):
for attn, ff in self.layers:
x = attn(x, *args) + x
x = ff(x, *args) + x
return x
class TransformerCrossAttn(nn.Module):
def __init__(
self,
dim: int,
depth: int,
heads: int,
dim_head: int,
mlp_dim: int,
dropout: float = 0.0,
norm: str = "layer",
norm_cond_dim: int = -1,
context_dim: Optional[int] = None,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)
ca = CrossAttention(
dim, context_dim=context_dim, heads=heads, dim_head=dim_head, dropout=dropout
)
ff = FeedForward(dim, mlp_dim, dropout=dropout)
self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ca, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args, context=None, context_list=None):
if context_list is None:
context_list = [context] * len(self.layers)
if len(context_list) != len(self.layers):
raise ValueError(f"len(context_list) != len(self.layers) ({len(context_list)} != {len(self.layers)})")
for i, (self_attn, cross_attn, ff) in enumerate(self.layers):
x = self_attn(x, *args) + x
x = cross_attn(x, *args, context=context_list[i]) + x
x = ff(x, *args) + x
return x
class DropTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[0, :, 0], self.p).bernoulli().bool()
# TODO: permutation idx for each batch using torch.argsort
if zero_mask.any():
x = x[:, ~zero_mask, :]
return x
class ZeroTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[:, :, 0], self.p).bernoulli().bool()
# Zero-out the masked tokens
x[zero_mask, :] = 0
return x
class TransformerEncoder(nn.Module):
def __init__(
self,
num_tokens: int,
token_dim: int,
dim: int,
depth: int,
heads: int,
mlp_dim: int,
dim_head: int = 64,
dropout: float = 0.0,
emb_dropout: float = 0.0,
emb_dropout_type: str = "drop",
emb_dropout_loc: str = "token",
norm: str = "layer",
norm_cond_dim: int = -1,
token_pe_numfreq: int = -1,
):
super().__init__()
if token_pe_numfreq > 0:
token_dim_new = token_dim * (2 * token_pe_numfreq + 1)
self.to_token_embedding = nn.Sequential(
Rearrange("b n d -> (b n) d", n=num_tokens, d=token_dim),
|
# from .vit import Attention, FeedForward
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
class PreNorm(nn.Module):
def __init__(self, dim: int, fn: Callable, norm: str = "layer", norm_cond_dim: int = -1):
super().__init__()
self.norm = normalization_layer(norm, dim, norm_cond_dim)
self.fn = fn
def forward(self, x: torch.Tensor, *args, **kwargs):
if isinstance(self.norm, AdaptiveLayerNorm1D):
return self.fn(self.norm(x, *args), **kwargs)
else:
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.0):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout),
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head**-0.5
self.attend = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = (
nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout))
if project_out
else nn.Identity()
)
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(self, dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head**-0.5
self.attend = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
context_dim = default(context_dim, dim)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias=False)
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_out = (
nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout))
if project_out
else nn.Identity()
)
def forward(self, x, context=None):
context = default(context, x)
k, v = self.to_kv(context).chunk(2, dim=-1)
q = self.to_q(x)
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), [q, k, v])
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
out = rearrange(out, "b h n d -> b n (h d)")
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
dim: int,
depth: int,
heads: int,
dim_head: int,
mlp_dim: int,
dropout: float = 0.0,
norm: str = "layer",
norm_cond_dim: int = -1,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)
ff = FeedForward(dim, mlp_dim, dropout=dropout)
self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args):
for attn, ff in self.layers:
x = attn(x, *args) + x
x = ff(x, *args) + x
return x
class TransformerCrossAttn(nn.Module):
def __init__(
self,
dim: int,
depth: int,
heads: int,
dim_head: int,
mlp_dim: int,
dropout: float = 0.0,
norm: str = "layer",
norm_cond_dim: int = -1,
context_dim: Optional[int] = None,
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
sa = Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)
ca = CrossAttention(
dim, context_dim=context_dim, heads=heads, dim_head=dim_head, dropout=dropout
)
ff = FeedForward(dim, mlp_dim, dropout=dropout)
self.layers.append(
nn.ModuleList(
[
PreNorm(dim, sa, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ca, norm=norm, norm_cond_dim=norm_cond_dim),
PreNorm(dim, ff, norm=norm, norm_cond_dim=norm_cond_dim),
]
)
)
def forward(self, x: torch.Tensor, *args, context=None, context_list=None):
if context_list is None:
context_list = [context] * len(self.layers)
if len(context_list) != len(self.layers):
raise ValueError(f"len(context_list) != len(self.layers) ({len(context_list)} != {len(self.layers)})")
for i, (self_attn, cross_attn, ff) in enumerate(self.layers):
x = self_attn(x, *args) + x
x = cross_attn(x, *args, context=context_list[i]) + x
x = ff(x, *args) + x
return x
class DropTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[0, :, 0], self.p).bernoulli().bool()
# TODO: permutation idx for each batch using torch.argsort
if zero_mask.any():
x = x[:, ~zero_mask, :]
return x
class ZeroTokenDropout(nn.Module):
def __init__(self, p: float = 0.1):
super().__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, " "but got {}".format(p)
)
self.p = p
def forward(self, x: torch.Tensor):
# x: (batch_size, seq_len, dim)
if self.training and self.p > 0:
zero_mask = torch.full_like(x[:, :, 0], self.p).bernoulli().bool()
# Zero-out the masked tokens
x[zero_mask, :] = 0
return x
class TransformerEncoder(nn.Module):
def __init__(
self,
num_tokens: int,
token_dim: int,
dim: int,
depth: int,
heads: int,
mlp_dim: int,
dim_head: int = 64,
dropout: float = 0.0,
emb_dropout: float = 0.0,
emb_dropout_type: str = "drop",
emb_dropout_loc: str = "token",
norm: str = "layer",
norm_cond_dim: int = -1,
token_pe_numfreq: int = -1,
):
super().__init__()
if token_pe_numfreq > 0:
token_dim_new = token_dim * (2 * token_pe_numfreq + 1)
self.to_token_embedding = nn.Sequential(
Rearrange("b n d -> (b n) d", n=num_tokens, d=token_dim), | FrequencyEmbedder(token_pe_numfreq, token_pe_numfreq - 1), | 1 | 2023-12-08 09:07:07+00:00 | 4k |
rogeriochaves/driver | driver/executor.py | [
{
"identifier": "extract_high_level_plan_and_actions",
"path": "driver/brain.py",
"snippet": "def extract_high_level_plan_and_actions(input: str):\n pattern = r\"^A\\. High.?level([\\s\\S]*?)^B\\.\\s*([\\s\\S]*)\"\n\n match = re.search(pattern, input, re.DOTALL | re.MULTILINE | re.IGNORECASE)\n\n ... | import os
import subprocess
import sys
import time
import pyautogui
import pyperclip
import pygetwindow
from typing import List
from driver.brain import (
extract_high_level_plan_and_actions,
extract_structured_actions,
plan_next_step_actions,
)
from driver.logger import print_action
from driver.annotator import annotate_image
from driver.types import Action, DebugConfig, LabelMap, Context, LabelMapItem
from colorama import Fore, Style
from driver.utils import is_retina_display | 2,572 |
def take_screenshot():
screenshot = pyautogui.screenshot()
os.makedirs("./output", exist_ok=True)
screenshot.save("./output/screenshot.png")
return "./output/screenshot.png"
def start(task: str, debug: DebugConfig):
screenshot = take_screenshot()
label_map, output_image_path, img_multiplier_factor = annotate_image(
screenshot, debug=debug
)
|
def take_screenshot():
screenshot = pyautogui.screenshot()
os.makedirs("./output", exist_ok=True)
screenshot.save("./output/screenshot.png")
return "./output/screenshot.png"
def start(task: str, debug: DebugConfig):
screenshot = take_screenshot()
label_map, output_image_path, img_multiplier_factor = annotate_image(
screenshot, debug=debug
)
| context: Context = { | 5 | 2023-12-10 17:18:28+00:00 | 4k |
baidubce/app-builder | appbuilder/core/components/retriever/bes_retriever.py | [
{
"identifier": "Component",
"path": "appbuilder/core/component.py",
"snippet": "class ComponentArguments(BaseModel):\nclass Component:\n def extract_values_to_dict(self):\n def __init__(self,\n meta: Optional[ComponentArguments] = ComponentArguments(),\n secret_key... | import importlib
import os
import random
import string
from typing import Dict, Any
from appbuilder.core.component import Component, Message
from appbuilder.core.components.embeddings.component import Embedding
from appbuilder.core.constants import GATEWAY_URL
from appbuilder.utils.logger_util import logger
from elasticsearch import Elasticsearch, helpers | 2,895 | """
secret_key = os.getenv("APPBUILDER_TOKEN")
if not secret_key.startswith("Bearer"):
secret_key = "Bearer {}".format(secret_key)
gateway = os.getenv("GATEWAY_URL") if os.getenv("GATEWAY_URL") else GATEWAY_URL
connection_params = {
"hosts": [gateway + self.prefix + self.base_es_url + cluster_id],
"http_auth": (user_name, password),
"headers": {'X-Appbuilder-Authorization': f"{secret_key}"}
}
bes_client = self.es(**connection_params)
try:
bes_client.info()
except Exception as e:
logger.error("connecting to bes error: {}".format(e))
raise ConnectionError(e)
return bes_client
def as_retriever(self):
"""
转化为retriever
"""
return BESRetriever(embedding=self.embedding, index_name=self.index_name, bes_client=self.bes_client,
index_type=self.index_type)
@staticmethod
def create_index_mappings(index_type, vector_dims):
"""
创建索引的mapping
"""
mappings = {
'properties': {
"vector": {
"type": "bpack_vector",
"dims": vector_dims,
},
}
}
if index_type == "hnsw":
mappings["properties"]["vector"]["index_type"] = "hnsw"
mappings["properties"]["vector"]["space_type"] = "cosine"
mappings["properties"]["vector"]["parameters"] = {"m": 4, "ef_construction": 200}
return mappings
def add_segments(self, segments: Message, metadata=""):
"""
向bes中插入数据
参数:
query (Message[str]): 需要插入的内容
返回:
"""
segment_vectors = self.embedding.batch(segments)
segment_vectors = segment_vectors.content
vector_dims = len(segment_vectors[0])
segments = segments.content
documents = [
{"_index": self.index_name,
"_source": {"text": segment, "vector": vector, "metadata": metadata,
"id": BESVectorStoreIndex.generate_id()}}
for segment, vector in zip(segments, segment_vectors)]
mappings = BESVectorStoreIndex.create_index_mappings(self.index_type, vector_dims)
self.bes_client.indices.create(index=self.index_name,
body={"settings": {"index": {"knn": True}}, "mappings": mappings})
self.helpers.bulk(self.bes_client, documents)
@classmethod
def from_segments(cls, segments, cluster_id, user_name, password, embedding=None, **kwargs):
"""
根据段落创建一个bes向量索引
参数:
segments: 切分的文本段落
cluster_id: bes集群ID
user_name: bes用户名
password: bes用户密码
embedding: 文本段落embedding工具
kwargs: 其他初始化参数
返回:
bes索引实例
"""
if embedding is None:
embedding = Embedding()
index_name = kwargs.get("index_name", None)
index_type = kwargs.get("index_type", "hnsw")
prefix = kwargs.get("prefix", "/rpc/2.0/cloud_hub")
vector_index = cls(cluster_id, user_name, password, embedding, index_name, index_type, prefix)
vector_index.add_segments(segments)
return vector_index
def delete_all_segments(self):
"""
删除索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
resp = self.bes_client.delete_by_query(index=self.index_name, body=query)
logger.debug("deleted {} documents in index {}".format(resp['deleted'], self.index_name))
def get_all_segments(self):
"""
获取索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
return self.bes_client.search(index=self.index_name, body=query)
| # Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
基于baidu ES的retriever
"""
class BESVectorStoreIndex:
"""
BES向量存储检索工具
"""
base_es_url: str = "/v1/bce/bes/cluster/"
def __init__(self, cluster_id, user_name, password, embedding=None, index_name=None,
index_type="hnsw", prefix="/rpc/2.0/cloud_hub"):
if embedding is None:
embedding = Embedding()
self.embedding = embedding
self.index_name = index_name if index_name else BESVectorStoreIndex.generate_id()
self.index_type = index_type
self.prefix = prefix
self._es = None
self._helpers = None
self.bes_client = self._create_bes_client(cluster_id, user_name, password)
@property
def es(self):
self._lazy_import_es()
return self._es
@property
def helpers(self):
self._lazy_import_es()
return self._helpers
def _lazy_import_es(self):
if self._es is None or self._helpers is None:
try:
self._es = Elasticsearch
self._helpers = helpers
except ImportError:
raise ImportError("Elasticsearch module is not installed. "
"Please install it using 'pip install elasticsearch==7.11.0'.")
@staticmethod
def generate_id(length=16):
"""
生成随机的ID
"""
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
def _create_bes_client(self, cluster_id, user_name, password):
"""
创建一个bes的client
"""
secret_key = os.getenv("APPBUILDER_TOKEN")
if not secret_key.startswith("Bearer"):
secret_key = "Bearer {}".format(secret_key)
gateway = os.getenv("GATEWAY_URL") if os.getenv("GATEWAY_URL") else GATEWAY_URL
connection_params = {
"hosts": [gateway + self.prefix + self.base_es_url + cluster_id],
"http_auth": (user_name, password),
"headers": {'X-Appbuilder-Authorization': f"{secret_key}"}
}
bes_client = self.es(**connection_params)
try:
bes_client.info()
except Exception as e:
logger.error("connecting to bes error: {}".format(e))
raise ConnectionError(e)
return bes_client
def as_retriever(self):
"""
转化为retriever
"""
return BESRetriever(embedding=self.embedding, index_name=self.index_name, bes_client=self.bes_client,
index_type=self.index_type)
@staticmethod
def create_index_mappings(index_type, vector_dims):
"""
创建索引的mapping
"""
mappings = {
'properties': {
"vector": {
"type": "bpack_vector",
"dims": vector_dims,
},
}
}
if index_type == "hnsw":
mappings["properties"]["vector"]["index_type"] = "hnsw"
mappings["properties"]["vector"]["space_type"] = "cosine"
mappings["properties"]["vector"]["parameters"] = {"m": 4, "ef_construction": 200}
return mappings
def add_segments(self, segments: Message, metadata=""):
"""
向bes中插入数据
参数:
query (Message[str]): 需要插入的内容
返回:
"""
segment_vectors = self.embedding.batch(segments)
segment_vectors = segment_vectors.content
vector_dims = len(segment_vectors[0])
segments = segments.content
documents = [
{"_index": self.index_name,
"_source": {"text": segment, "vector": vector, "metadata": metadata,
"id": BESVectorStoreIndex.generate_id()}}
for segment, vector in zip(segments, segment_vectors)]
mappings = BESVectorStoreIndex.create_index_mappings(self.index_type, vector_dims)
self.bes_client.indices.create(index=self.index_name,
body={"settings": {"index": {"knn": True}}, "mappings": mappings})
self.helpers.bulk(self.bes_client, documents)
@classmethod
def from_segments(cls, segments, cluster_id, user_name, password, embedding=None, **kwargs):
"""
根据段落创建一个bes向量索引
参数:
segments: 切分的文本段落
cluster_id: bes集群ID
user_name: bes用户名
password: bes用户密码
embedding: 文本段落embedding工具
kwargs: 其他初始化参数
返回:
bes索引实例
"""
if embedding is None:
embedding = Embedding()
index_name = kwargs.get("index_name", None)
index_type = kwargs.get("index_type", "hnsw")
prefix = kwargs.get("prefix", "/rpc/2.0/cloud_hub")
vector_index = cls(cluster_id, user_name, password, embedding, index_name, index_type, prefix)
vector_index.add_segments(segments)
return vector_index
def delete_all_segments(self):
"""
删除索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
resp = self.bes_client.delete_by_query(index=self.index_name, body=query)
logger.debug("deleted {} documents in index {}".format(resp['deleted'], self.index_name))
def get_all_segments(self):
"""
获取索引中的全部内容
"""
query = {
'query': {
'match_all': {}
}
}
return self.bes_client.search(index=self.index_name, body=query)
| class BESRetriever(Component): | 0 | 2023-12-05 01:48:12+00:00 | 4k |
corfyi/UCMCTrack | demo.py | [
{
"identifier": "UCMCTrack",
"path": "tracker/ucmc.py",
"snippet": "class UCMCTrack(object):\n def __init__(self,a1,a2,wx, wy,vmax, max_age, fps, dataset, high_score, use_cmc,detector = None):\n self.wx = wx\n self.wy = wy\n self.vmax = vmax\n self.dataset = dataset\n ... | from ultralytics import YOLO
from tracker.ucmc import UCMCTrack
from detector.mapper import Mapper
import os,cv2
import argparse
import numpy as np | 3,527 |
# 定义一个Detection类,包含id,bb_left,bb_top,bb_width,bb_height,conf,det_class
class Detection:
def __init__(self, id, bb_left = 0, bb_top = 0, bb_width = 0, bb_height = 0, conf = 0, det_class = 0):
self.id = id
self.bb_left = bb_left
self.bb_top = bb_top
self.bb_width = bb_width
self.bb_height = bb_height
self.conf = conf
self.det_class = det_class
self.track_id = 0
self.y = np.zeros((2, 1))
self.R = np.eye(4)
def __str__(self):
return 'd{}, bb_box:[{},{},{},{}], conf={:.2f}, class{}, uv:[{:.0f},{:.0f}], mapped to:[{:.1f},{:.1f}]'.format(
self.id, self.bb_left, self.bb_top, self.bb_width, self.bb_height, self.conf, self.det_class,
self.bb_left+self.bb_width/2,self.bb_top+self.bb_height,self.y[0,0],self.y[1,0])
def __repr__(self):
return self.__str__()
# Detector类,用于从Yolo检测器获取目标检测的结果
class Detector:
def __init__(self):
self.seq_length = 0
self.gmc = None
def load(self,cam_para_file):
self.mapper = Mapper(cam_para_file,"MOT17")
self.model = YOLO('pretrained/yolov8x.pt')
def get_dets(self, img,conf_thresh = 0,det_classes = [0]):
dets = []
# 将帧从 BGR 转换为 RGB(因为 OpenCV 使用 BGR 格式)
frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 使用 RTDETR 进行推理
results = self.model(frame,imgsz = 1088)
det_id = 0
for box in results[0].boxes:
conf = box.conf.cpu().numpy()[0]
bbox = box.xyxy.cpu().numpy()[0]
cls_id = box.cls.cpu().numpy()[0]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w <= 10 and h <= 10 or cls_id not in det_classes or conf <= conf_thresh:
continue
# 新建一个Detection对象
det = Detection(det_id)
det.bb_left = bbox[0]
det.bb_top = bbox[1]
det.bb_width = w
det.bb_height = h
det.conf = conf
det.det_class = cls_id
det.y,det.R = self.mapper.mapto([det.bb_left,det.bb_top,det.bb_width,det.bb_height])
det_id += 1
dets.append(det)
return dets
def main(args):
class_list = [2,5,7]
cap = cv2.VideoCapture(args.video)
# 获取视频的 fps
fps = cap.get(cv2.CAP_PROP_FPS)
# 获取视频的宽度和高度
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_out = cv2.VideoWriter('output/output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
# 打开一个cv的窗口,指定高度和宽度
cv2.namedWindow("demo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("demo", width, height)
detector = Detector()
detector.load(args.cam_para)
|
# 定义一个Detection类,包含id,bb_left,bb_top,bb_width,bb_height,conf,det_class
class Detection:
def __init__(self, id, bb_left = 0, bb_top = 0, bb_width = 0, bb_height = 0, conf = 0, det_class = 0):
self.id = id
self.bb_left = bb_left
self.bb_top = bb_top
self.bb_width = bb_width
self.bb_height = bb_height
self.conf = conf
self.det_class = det_class
self.track_id = 0
self.y = np.zeros((2, 1))
self.R = np.eye(4)
def __str__(self):
return 'd{}, bb_box:[{},{},{},{}], conf={:.2f}, class{}, uv:[{:.0f},{:.0f}], mapped to:[{:.1f},{:.1f}]'.format(
self.id, self.bb_left, self.bb_top, self.bb_width, self.bb_height, self.conf, self.det_class,
self.bb_left+self.bb_width/2,self.bb_top+self.bb_height,self.y[0,0],self.y[1,0])
def __repr__(self):
return self.__str__()
# Detector类,用于从Yolo检测器获取目标检测的结果
class Detector:
def __init__(self):
self.seq_length = 0
self.gmc = None
def load(self,cam_para_file):
self.mapper = Mapper(cam_para_file,"MOT17")
self.model = YOLO('pretrained/yolov8x.pt')
def get_dets(self, img,conf_thresh = 0,det_classes = [0]):
dets = []
# 将帧从 BGR 转换为 RGB(因为 OpenCV 使用 BGR 格式)
frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 使用 RTDETR 进行推理
results = self.model(frame,imgsz = 1088)
det_id = 0
for box in results[0].boxes:
conf = box.conf.cpu().numpy()[0]
bbox = box.xyxy.cpu().numpy()[0]
cls_id = box.cls.cpu().numpy()[0]
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w <= 10 and h <= 10 or cls_id not in det_classes or conf <= conf_thresh:
continue
# 新建一个Detection对象
det = Detection(det_id)
det.bb_left = bbox[0]
det.bb_top = bbox[1]
det.bb_width = w
det.bb_height = h
det.conf = conf
det.det_class = cls_id
det.y,det.R = self.mapper.mapto([det.bb_left,det.bb_top,det.bb_width,det.bb_height])
det_id += 1
dets.append(det)
return dets
def main(args):
class_list = [2,5,7]
cap = cv2.VideoCapture(args.video)
# 获取视频的 fps
fps = cap.get(cv2.CAP_PROP_FPS)
# 获取视频的宽度和高度
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_out = cv2.VideoWriter('output/output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
# 打开一个cv的窗口,指定高度和宽度
cv2.namedWindow("demo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("demo", width, height)
detector = Detector()
detector.load(args.cam_para)
| tracker = UCMCTrack(args.a, args.a, args.wx, args.wy, args.vmax, args.cdt, fps, "MOT", args.high_score,False,None) | 0 | 2023-12-12 07:29:20+00:00 | 4k |
ingra14m/Specular-Gaussians | scene/gaussian_model.py | [
{
"identifier": "inverse_sigmoid",
"path": "utils/general_utils.py",
"snippet": "def inverse_sigmoid(x):\n return torch.log(x / (1 - x))"
},
{
"identifier": "get_expon_lr_func",
"path": "utils/general_utils.py",
"snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_s... | import torch
import numpy as np
import os
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation, get_linear_noise_func
from torch import nn
from utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from utils.sh_utils import RGB2SH
from simple_knn._C import distCUDA2
from utils.graphics_utils import BasicPointCloud
from utils.general_utils import strip_symmetric, build_scaling_rotation, flip_align_view, get_minimum_axis | 2,512 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class GaussianModel:
def __init__(self, sh_degree: int, asg_degree: int):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self.max_asg_degree = asg_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self._features_asg = torch.empty(0)
self._normal = torch.empty(0)
self._normal2 = torch.empty(0)
self._roughness = torch.empty(0)
self._albedo = torch.empty(0)
self._metallic = torch.empty(0)
self.optimizer = None
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
@property
def get_asg_features(self):
return self._features_asg
@property
def get_roughness(self):
return self._roughness
@property
def get_albedo(self):
return self._albedo
@property
def get_metallic(self):
return self._metallic
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier=1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def get_normal(self, dir_pp_normalized=None, return_delta=False):
normal_axis = self.get_minimum_axis
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class GaussianModel:
def __init__(self, sh_degree: int, asg_degree: int):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self.max_asg_degree = asg_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self._features_asg = torch.empty(0)
self._normal = torch.empty(0)
self._normal2 = torch.empty(0)
self._roughness = torch.empty(0)
self._albedo = torch.empty(0)
self._metallic = torch.empty(0)
self.optimizer = None
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
@property
def get_asg_features(self):
return self._features_asg
@property
def get_roughness(self):
return self._roughness
@property
def get_albedo(self):
return self._albedo
@property
def get_metallic(self):
return self._metallic
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier=1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def get_normal(self, dir_pp_normalized=None, return_delta=False):
normal_axis = self.get_minimum_axis | normal_axis, positive = flip_align_view(normal_axis, dir_pp_normalized) | 9 | 2023-12-12 14:59:01+00:00 | 4k |
Artiprocher/DiffSynth-Studio | diffsynth/models/sd_vae_decoder.py | [
{
"identifier": "Attention",
"path": "diffsynth/models/attention.py",
"snippet": "class Attention(torch.nn.Module):\n\n def __init__(self, q_dim, num_heads, head_dim, kv_dim=None, bias_q=False, bias_kv=False, bias_out=False):\n super().__init__()\n dim_inner = head_dim * num_heads\n ... | import torch
from .attention import Attention
from .sd_unet import ResnetBlock, UpSampler
from .tiler import TileWorker | 3,000 |
class VAEAttentionBlock(torch.nn.Module):
def __init__(self, num_attention_heads, attention_head_dim, in_channels, num_layers=1, norm_num_groups=32, eps=1e-5):
super().__init__()
inner_dim = num_attention_heads * attention_head_dim
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=eps, affine=True)
self.transformer_blocks = torch.nn.ModuleList([
Attention(
inner_dim,
num_attention_heads,
attention_head_dim,
bias_q=True,
bias_kv=True,
bias_out=True
)
for d in range(num_layers)
])
def forward(self, hidden_states, time_emb, text_emb, res_stack):
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
for block in self.transformer_blocks:
hidden_states = block(hidden_states)
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
hidden_states = hidden_states + residual
return hidden_states, time_emb, text_emb, res_stack
class SDVAEDecoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.scaling_factor = 0.18215
self.post_quant_conv = torch.nn.Conv2d(4, 4, kernel_size=1)
self.conv_in = torch.nn.Conv2d(4, 512, kernel_size=3, padding=1)
self.blocks = torch.nn.ModuleList([
# UNetMidBlock2D
|
class VAEAttentionBlock(torch.nn.Module):
def __init__(self, num_attention_heads, attention_head_dim, in_channels, num_layers=1, norm_num_groups=32, eps=1e-5):
super().__init__()
inner_dim = num_attention_heads * attention_head_dim
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=eps, affine=True)
self.transformer_blocks = torch.nn.ModuleList([
Attention(
inner_dim,
num_attention_heads,
attention_head_dim,
bias_q=True,
bias_kv=True,
bias_out=True
)
for d in range(num_layers)
])
def forward(self, hidden_states, time_emb, text_emb, res_stack):
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
for block in self.transformer_blocks:
hidden_states = block(hidden_states)
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
hidden_states = hidden_states + residual
return hidden_states, time_emb, text_emb, res_stack
class SDVAEDecoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.scaling_factor = 0.18215
self.post_quant_conv = torch.nn.Conv2d(4, 4, kernel_size=1)
self.conv_in = torch.nn.Conv2d(4, 512, kernel_size=3, padding=1)
self.blocks = torch.nn.ModuleList([
# UNetMidBlock2D | ResnetBlock(512, 512, eps=1e-6), | 1 | 2023-12-07 16:52:15+00:00 | 4k |
vikhyat/mixtral-inference | main.py | [
{
"identifier": "RotatingBufferCache",
"path": "mixtral/cache.py",
"snippet": "class RotatingBufferCache:\n \"\"\"\n This is an example that implements a less naive rotating buffer cache, allowing for variable length sequences.\n Allocated cache is rectangular which is wasteful (see PagedAttent... | from mixtral.cache import RotatingBufferCache
from typing import List
from pathlib import Path
from mixtral.model import Transformer
from mixtral.tokenizer import Tokenizer
import torch | 2,770 |
def sample_top_p(probs: torch.Tensor, p: float):
assert 0 <= p <= 1
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
return torch.gather(probs_idx, -1, next_token)
def sample(logits: torch.Tensor, temperature: float, top_p: float):
if temperature > 0:
probs = torch.softmax(logits / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits, dim=-1).unsqueeze(0)
return next_token.reshape(-1)
@torch.inference_mode()
def generate(prompts: List[str], model: Transformer, tokenizer: Tokenizer, *, max_tokens: int, chunk_size: int = None, temperature: float = 0.7, stdout=False):
model = model.eval()
B, V = len(prompts), model.args.vocab_size
# Tokenize
encoded_prompts = [tokenizer.encode(prompt, bos=True) for prompt in prompts]
seqlens = [len(x) for x in encoded_prompts]
# Cache
cache_window = max(seqlens) + max_tokens
|
def sample_top_p(probs: torch.Tensor, p: float):
assert 0 <= p <= 1
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
return torch.gather(probs_idx, -1, next_token)
def sample(logits: torch.Tensor, temperature: float, top_p: float):
if temperature > 0:
probs = torch.softmax(logits / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits, dim=-1).unsqueeze(0)
return next_token.reshape(-1)
@torch.inference_mode()
def generate(prompts: List[str], model: Transformer, tokenizer: Tokenizer, *, max_tokens: int, chunk_size: int = None, temperature: float = 0.7, stdout=False):
model = model.eval()
B, V = len(prompts), model.args.vocab_size
# Tokenize
encoded_prompts = [tokenizer.encode(prompt, bos=True) for prompt in prompts]
seqlens = [len(x) for x in encoded_prompts]
# Cache
cache_window = max(seqlens) + max_tokens | cache = RotatingBufferCache(model.args.n_layers, model.args.max_batch_size, cache_window, model.args.n_kv_heads, model.args.head_dim) | 0 | 2023-12-08 22:48:32+00:00 | 4k |
u2seg/U2Seg | u2seg/UnsupervisedSelectiveLabeling/shared/utils/nn_utils_imagenet.py | [
{
"identifier": "cfg",
"path": "u2seg/UnsupervisedSelectiveLabeling/shared/utils/config_utils.py",
"snippet": "class CustomFormatter(logging.Formatter):\n FORMATS = {\n logging.DEBUG: format,\n logging.INFO: grey + format + reset,\n logging.WARNING: yellow + format + reset,\n ... | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import pandas as pd
import clip
from glob import glob
from PIL import Image, ImageFilter
from pykeops.torch import LazyTensor
from tqdm import tqdm
from .config_utils import cfg
from .nn_utils import get_transform, normalization_kwargs_dict | 3,125 |
# Credit: https://github.com/amazon-research/exponential-moving-average-normalization
def build_hidden_head(num_mlp, dim_mlp, dim, normed=False):
modules = []
for _ in range(1, num_mlp):
modules.append(nn.Linear(dim_mlp, dim_mlp))
modules.append(nn.ReLU())
modules.append(nn.Linear(dim_mlp, dim))
if normed:
modules.append(L2NormLayer())
return nn.Sequential(*modules)
# Credit: https://github.com/wvangansbeke/Unsupervised-Classification/blob/master/data/imagenet.py
class ImageNet(datasets.ImageFolder):
def __init__(self, root, split='train', transform=None):
super(ImageNet, self).__init__(root=os.path.join(root, split),
transform=None)
self.transform = transform
self.split = split
self.resize = transforms.Resize(256)
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
im_size = img.size
img = self.resize(img)
if self.transform is not None:
img = self.transform(img)
out = {'image': img, 'target': target, 'meta': {
'im_size': im_size, 'index': index}}
return out
def get_image(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
img = self.resize(img)
return img
# class ImageNetSubset(torch.utils.data.Dataset):
# def __init__(self, subset_file, root, split='train',
# transform=None, return_dict=True):
# super(ImageNetSubset, self).__init__()
# self.root = os.path.join(root, split)
# self.transform = transform
# self.split = split
# # Read the subset of classes to include (sorted)
# with open(subset_file, 'r') as f:
# result = f.read().splitlines()
# subdirs, class_names = [], []
# for line in result:
# subdir, class_name = line.split(' ', 1)
# subdirs.append(subdir)
# class_names.append(class_name)
# # Gather the files (sorted)
# imgs = []
# targets = []
# for i, subdir in enumerate(subdirs):
# # subdir_path = os.path.join(self.root, subdir)
# files = sorted(glob(os.path.join(self.root, subdir, '*.JPEG')))
# for f in files:
# imgs.append((f, i))
# targets.append(i)
# self.imgs = imgs
# self.classes = class_names
# self.targets = targets
# self.resize = transforms.Resize(256)
# self.return_dict = return_dict
# def get_image(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# img = self.resize(img)
# return img
# def __len__(self):
# return len(self.imgs)
# def __getitem__(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# im_size = img.size
# if self.return_dict:
# img = self.resize(img)
# class_name = self.classes[target]
# if self.transform is not None:
# img = self.transform(img)
# if self.return_dict:
# out = {'image': img, 'target': target, 'meta': {
# 'im_size': im_size, 'index': index, 'class_name': class_name}}
# return out
# return img, target
def train_dataset_imagenet(transform_name, add_memory_bank_dataset=False):
# Uses MoCov2 aug: https://github.com/facebookresearch/moco/blob/main/main_moco.py
if transform_name == "imagenet" or transform_name == "imagenet100":
| # Datasets
def get_clip_model(name):
model, preprocess = clip.load(name)
model.eval()
model.cuda()
return model, preprocess
def get_img_path_from_full_path(full_path):
img_path = full_path[0]
sep = "/"
img_path = sep.join(img_path.split(sep)[-2:])
return img_path
def gen_csv_data(save_filename, selected_inds, train_memory_dataset, gen_rem=False):
if isinstance(selected_inds, torch.Tensor):
print("***Please use numpy array as selected_inds***")
# gen_rem: generate remaining data by excluding the selected data
if gen_rem:
rem_set = set(range(len(train_memory_dataset.imgs)))
rem_set = rem_set - set(list(selected_inds))
selected_inds = np.array(list(rem_set))
selected_inds = np.sort(selected_inds)
print(len(selected_inds))
d = []
for ind in selected_inds:
d.append([ind, get_img_path_from_full_path(train_memory_dataset.imgs[ind])])
filename = "{}.csv".format(save_filename)
assert not os.path.exists(filename), "path {} exists".format(filename)
df = pd.DataFrame(data=d, columns=["Index", "ImageID"])
df.to_csv(filename, index=False)
def save_data(gen_mode, stratified_density_selected_data_output, ours_filename_part, feats_list, final_sample_num, chosen_percent, train_memory_dataset):
print("Generation mode:", gen_mode)
if gen_mode == "ours":
selected_inds = stratified_density_selected_data_output
filename_part = ours_filename_part
elif gen_mode == "random":
np.random.seed(0)
selected_inds = np.random.choice(feats_list.size(0), size=(final_sample_num,), replace=False)
filename_part = "random"
else:
raise ValueError("gen_mode: " + gen_mode)
for gen_rem in [False, True]:
if gen_rem:
filename = "train_{}p_gen_{}_index".format(100 - chosen_percent, filename_part)
else:
filename = "train_{}p_gen_{}_index".format(chosen_percent, filename_part)
filename = os.path.join(cfg.RUN_DIR, filename)
print("Filename:", filename)
gen_csv_data(filename, selected_inds, train_memory_dataset, gen_rem=gen_rem)
def get_sample_info_imagenet1(final_sample_num):
if final_sample_num == 4:
# 0.3 percent
num_centroids = 4
chosen_percent = 0.3
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_sample_info_imagenet100(final_sample_num):
if final_sample_num == 400:
# 0.3 percent
num_centroids = 400
chosen_percent = 0.3
elif final_sample_num == 4000:
# 0.3 percent
num_centroids = 4000
chosen_percent = 3
elif final_sample_num == 8000:
# 0.3 percent
num_centroids = 8000
chosen_percent = 6
elif final_sample_num == 16000:
# 0.3 percent
num_centroids = 16000
chosen_percent = 12
elif final_sample_num == 32000:
# 0.3 percent
num_centroids = 32000
chosen_percent = 24
elif final_sample_num == 64000:
# 0.3 percent
num_centroids = 64000
chosen_percent = 48
elif final_sample_num == 100:
# 0.3 percent
num_centroids = 100
chosen_percent = 0.075
elif final_sample_num == 3200:
# 0.3 percent
num_centroids = 3200
chosen_percent = 2.4
elif final_sample_num == 200:
# 0.3 percent
num_centroids = 200
chosen_percent = 0.15
elif final_sample_num == 800:
# 0.3 percent
num_centroids = 800
chosen_percent = 0.6
elif final_sample_num == 1200:
# 0.3 percent
num_centroids = 1200
chosen_percent = 0.9
elif final_sample_num == 1600:
# 0.3 percent
num_centroids = 1600
chosen_percent = 1.2
elif final_sample_num == 300:
# 0.3 percent
num_centroids = 300
chosen_percent = 0.225
elif final_sample_num == 200:
# 0.3 percent
num_centroids = 200
chosen_percent = 0.15
elif final_sample_num == 100:
# 0.3 percent
num_centroids = 400
chosen_percent = 0.075
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_sample_info_coco(final_sample_num):
if final_sample_num == 80:
# 0.3 percent
num_centroids = 80
chosen_percent = 0.016
elif final_sample_num == 300:
# 0.3 percent
num_centroids = 300
chosen_percent = 0.06
elif final_sample_num == 800:
# 0.3 percent
num_centroids = 800
chosen_percent = 0.16
elif final_sample_num == 2911:
# 0.3 percent
num_centroids = 2911
chosen_percent = 0.582
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_sample_info_imagenet(final_sample_num):
if final_sample_num == 12820:
# 1 percent
num_centroids = 12900
chosen_percent = 1
if final_sample_num == 1282:
# 0.1 percent
num_centroids = 1290
chosen_percent = 0.1
elif final_sample_num == 2911:
# 0.2 percent
num_centroids = 2911
chosen_percent = 0.2
elif final_sample_num == 290:
# 0.2 percent
num_centroids = 290
chosen_percent = 0.02
elif final_sample_num == 300:
# 0.2 percent
num_centroids = 300
chosen_percent = 0.021
elif final_sample_num == 800:
# 0.2 percent
num_centroids = 800
chosen_percent = 0.06
elif final_sample_num == 80:
# 0.2 percent
num_centroids = 80
chosen_percent = 0.006
elif final_sample_num == 1600:
# 0.2 percent
num_centroids = 1600
chosen_percent = 0.12
else:
raise ValueError(final_sample_num)
return num_centroids, chosen_percent
def get_selection_with_reg_imagenet_outliers(data, neighbors_dist, cluster_labels, num_centroids,
iters=1, final_sample_num=None, w=1, momentum=0.5, horizon_num=256, alpha=1, exclude_same_cluster=False, verbose=False):
# Intuition: horizon_num = dimension * 2
cluster_labels_cuda = cluster_labels.cuda()
neighbors_dist_cuda = neighbors_dist.cuda()
selection_regularizer = torch.zeros_like(neighbors_dist_cuda)
data = data.cuda()
N, D = data.shape # Number of samples, dimension of the ambient space
data_expanded_lazy = LazyTensor(data.view(N, 1, D))
for iter_ind in tqdm(range(iters)):
selected_inds = []
if verbose:
print("Computing selected ids")
print("selection_regularizer", selection_regularizer)
for iter_ind in tqdm(range(iters)):
selected_inds = []
if verbose:
print("Computing selected ids")
print("selection_regularizer", selection_regularizer)
for cls_ind in range(num_centroids):
if len(selected_inds) == final_sample_num:
break
match_arr = cluster_labels_cuda == cls_ind
match = torch.where(match_arr)[0]
if len(match) == 0:
continue
scores = 1 / neighbors_dist_cuda[match_arr] - w * selection_regularizer[match_arr]
if iter_ind != 0 and cls_ind == 0 and verbose:
print("original score:", (1 / neighbors_dist_cuda[match_arr]).mean(),
"regularizer adjustment:", (w * selection_regularizer[match_arr]).mean())
max_dist_ind = scores.argmin() # 选出距离最远的样本
selected_inds.append(match[max_dist_ind].item())
selected_inds = torch.tensor(selected_inds)
if iter_ind < iters - 1: # Not the last iteration
if verbose:
print("Updating selection regularizer")
selected_data = data[selected_inds]
if not exclude_same_cluster:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
new_selection_regularizer = new_selection_regularizer.Kmin(
horizon_num, dim=1)
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to oneself should be ignored
new_selection_regularizer[new_selection_regularizer == 0] = 1e10
else:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
# We take the horizon_num samples with the min distance to the other centroids
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
# indices within selected data
new_selection_regularizer, selected_data_ind = new_selection_regularizer.Kmin_argKmin(horizon_num,
dim=1, backend="GPU")
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to the instance in the same cluster should be ignored (including oneself if the sample is currently selected)
# **NOTE**: if some clusters are skipped, select_data_ind may not match cluster_labels
# This does not happen in 0.2% case, but could happen in 1% case.
same_cluster_selected_data_ind_mask = (
selected_data_ind == cluster_labels_cuda.view((-1, 1))).float()
# It's true that if cluster is not in the list, some instances will have one more regularizer item, but this is a small contribution.
new_selection_regularizer = (1 - same_cluster_selected_data_ind_mask) * \
new_selection_regularizer + same_cluster_selected_data_ind_mask * 1e10
assert not torch.any(new_selection_regularizer == 0), "{}".format(
torch.where(new_selection_regularizer == 0))
if verbose:
print("Min:", new_selection_regularizer.min())
# selection_regularizer: N_full
if alpha != 1:
new_selection_regularizer = (
1 / new_selection_regularizer ** alpha).sum(dim=1)
else:
new_selection_regularizer = (
1 / new_selection_regularizer).sum(dim=1)
selection_regularizer = selection_regularizer * \
momentum + new_selection_regularizer * (1 - momentum)
del cluster_labels_cuda
del neighbors_dist_cuda
del data
# import pdb
# pdb.set_trace()
print("selected_inds:"+str(len(selected_inds)))
print("final_sample_num:"+str(final_sample_num))
# assert len(selected_inds) == final_sample_num
return selected_inds.numpy()
def get_selection_with_reg_imagenet(data, neighbors_dist, cluster_labels, num_centroids,
iters=1, final_sample_num=None, w=1, momentum=0.5, horizon_num=256, alpha=1, exclude_same_cluster=False, verbose=False):
# Intuition: horizon_num = dimension * 2
cluster_labels_cuda = cluster_labels.cuda()
neighbors_dist_cuda = neighbors_dist.cuda()
selection_regularizer = torch.zeros_like(neighbors_dist_cuda)
data = data.cuda()
N, D = data.shape # Number of samples, dimension of the ambient space
data_expanded_lazy = LazyTensor(data.view(N, 1, D))
for iter_ind in tqdm(range(iters)):
selected_inds = []
if verbose:
print("Computing selected ids")
print("selection_regularizer", selection_regularizer)
for cls_ind in range(num_centroids):
if len(selected_inds) == final_sample_num:
break
match_arr = cluster_labels_cuda == cls_ind
match = torch.where(match_arr)[0]
if len(match) == 0:
continue
# scores in the selection process
# No prior:
# scores = 1 / neighbors_dist[match_arr]
scores = 1 / \
neighbors_dist_cuda[match_arr] - w * \
selection_regularizer[match_arr]
if iter_ind != 0 and cls_ind == 0 and verbose:
print("original score:", (1 / neighbors_dist_cuda[match_arr]).mean(),
"regularizer adjustment:", (w * selection_regularizer[match_arr]).mean())
min_dist_ind = scores.argmax()
# min_dist_ind = scores.argmax()
selected_inds.append(match[min_dist_ind].item())
selected_inds = torch.tensor(selected_inds)
if iter_ind < iters - 1: # Not the last iteration
if verbose:
print("Updating selection regularizer")
selected_data = data[selected_inds]
if not exclude_same_cluster:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
new_selection_regularizer = new_selection_regularizer.Kmin(
horizon_num, dim=1)
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to oneself should be ignored
new_selection_regularizer[new_selection_regularizer == 0] = 1e10
else:
# This is square distances: (N_full, N_selected)
# data: (N_full, 1, dim)
# selected_data: (1, N_selected, dim)
# We take the horizon_num samples with the min distance to the other centroids
new_selection_regularizer = (
(data_expanded_lazy - selected_data[None, :, :]) ** 2).sum(dim=-1)
# indices within selected data
new_selection_regularizer, selected_data_ind = new_selection_regularizer.Kmin_argKmin(horizon_num,
dim=1, backend="GPU")
if verbose:
print("new_selection_regularizer shape:",
new_selection_regularizer.shape)
print("Max:", new_selection_regularizer.max())
print("Mean:", new_selection_regularizer.mean())
# Distance to the instance in the same cluster should be ignored (including oneself if the sample is currently selected)
# **NOTE**: if some clusters are skipped, select_data_ind may not match cluster_labels
# This does not happen in 0.2% case, but could happen in 1% case.
same_cluster_selected_data_ind_mask = (
selected_data_ind == cluster_labels_cuda.view((-1, 1))).float()
# It's true that if cluster is not in the list, some instances will have one more regularizer item, but this is a small contribution.
new_selection_regularizer = (1 - same_cluster_selected_data_ind_mask) * \
new_selection_regularizer + same_cluster_selected_data_ind_mask * 1e10
assert not torch.any(new_selection_regularizer == 0), "{}".format(
torch.where(new_selection_regularizer == 0))
if verbose:
print("Min:", new_selection_regularizer.min())
# selection_regularizer: N_full
if alpha != 1:
new_selection_regularizer = (
1 / new_selection_regularizer ** alpha).sum(dim=1)
else:
new_selection_regularizer = (
1 / new_selection_regularizer).sum(dim=1)
selection_regularizer = selection_regularizer * \
momentum + new_selection_regularizer * (1 - momentum)
del cluster_labels_cuda
del neighbors_dist_cuda
del data
# import pdb
# pdb.set_trace()
assert len(selected_inds) == final_sample_num
return selected_inds.numpy()
# Credit: MoCov2 https://github.com/facebookresearch/moco/blob/main/moco/loader.py
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class L2NormLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.normalize(x, dim=1)
# Credit: https://github.com/amazon-research/exponential-moving-average-normalization
def build_hidden_head(num_mlp, dim_mlp, dim, normed=False):
modules = []
for _ in range(1, num_mlp):
modules.append(nn.Linear(dim_mlp, dim_mlp))
modules.append(nn.ReLU())
modules.append(nn.Linear(dim_mlp, dim))
if normed:
modules.append(L2NormLayer())
return nn.Sequential(*modules)
# Credit: https://github.com/wvangansbeke/Unsupervised-Classification/blob/master/data/imagenet.py
class ImageNet(datasets.ImageFolder):
def __init__(self, root, split='train', transform=None):
super(ImageNet, self).__init__(root=os.path.join(root, split),
transform=None)
self.transform = transform
self.split = split
self.resize = transforms.Resize(256)
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
im_size = img.size
img = self.resize(img)
if self.transform is not None:
img = self.transform(img)
out = {'image': img, 'target': target, 'meta': {
'im_size': im_size, 'index': index}}
return out
def get_image(self, index):
path, target = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
img = self.resize(img)
return img
# class ImageNetSubset(torch.utils.data.Dataset):
# def __init__(self, subset_file, root, split='train',
# transform=None, return_dict=True):
# super(ImageNetSubset, self).__init__()
# self.root = os.path.join(root, split)
# self.transform = transform
# self.split = split
# # Read the subset of classes to include (sorted)
# with open(subset_file, 'r') as f:
# result = f.read().splitlines()
# subdirs, class_names = [], []
# for line in result:
# subdir, class_name = line.split(' ', 1)
# subdirs.append(subdir)
# class_names.append(class_name)
# # Gather the files (sorted)
# imgs = []
# targets = []
# for i, subdir in enumerate(subdirs):
# # subdir_path = os.path.join(self.root, subdir)
# files = sorted(glob(os.path.join(self.root, subdir, '*.JPEG')))
# for f in files:
# imgs.append((f, i))
# targets.append(i)
# self.imgs = imgs
# self.classes = class_names
# self.targets = targets
# self.resize = transforms.Resize(256)
# self.return_dict = return_dict
# def get_image(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# img = self.resize(img)
# return img
# def __len__(self):
# return len(self.imgs)
# def __getitem__(self, index):
# path, target = self.imgs[index]
# with open(path, 'rb') as f:
# img = Image.open(f).convert('RGB')
# im_size = img.size
# if self.return_dict:
# img = self.resize(img)
# class_name = self.classes[target]
# if self.transform is not None:
# img = self.transform(img)
# if self.return_dict:
# out = {'image': img, 'target': target, 'meta': {
# 'im_size': im_size, 'index': index, 'class_name': class_name}}
# return out
# return img, target
def train_dataset_imagenet(transform_name, add_memory_bank_dataset=False):
# Uses MoCov2 aug: https://github.com/facebookresearch/moco/blob/main/main_moco.py
if transform_name == "imagenet" or transform_name == "imagenet100": | normalization_kwargs = normalization_kwargs_dict[transform_name] | 1 | 2023-12-05 01:13:31+00:00 | 4k |
upfusion3d/upfusion | control_net/ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "control_net/ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backwar... | from abc import abstractmethod
from control_net.ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from control_net.ldm.modules.attention import SpatialTransformer, SLTQueryTransformer
from control_net.ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 3,154 |
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, slt=None, query_cameras=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None, slt=None, query_cameras=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context) | elif isinstance(layer, SLTQueryTransformer): | 8 | 2023-12-12 00:49:11+00:00 | 4k |
modelscope/normal-depth-diffusion | tools/draw_imgs/draw_vae_epoch_curve.py | [
{
"identifier": "Txt2ImgIterableBaseDataset",
"path": "ldm/data/base.py",
"snippet": "class Txt2ImgIterableBaseDataset(IterableDataset):\n '''\n Define an interface to make the IterableDatasets for text2img data chainable\n '''\n\n def __init__(self, num_records=0, valid_ids=None, size=256):... | import argparse
import csv
import datetime
import glob
import importlib
import multiprocessing
import os
import pdb
import sys
import time
import warnings
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
import pudb
import signal
from functools import partial
from ldm.data.base import Txt2ImgIterableBaseDataset
from ldm.util import instantiate_from_config
from omegaconf import OmegaConf
from packaging import version
from PIL import Image
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import (Callback, LearningRateMonitor,
ModelCheckpoint)
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.utilities import rank_zero_info
from torch import autocast
from torch.utils.data import DataLoader, Dataset, Subset, random_split
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.plugins.precision import MixedPrecisionPlugin
from pytorch_lightning.utilities.distributed import rank_zero_only | 1,664 | type=str2bool,
nargs='?',
const=True,
default=False,
help='enable post-mortem debugging',
)
parser.add_argument(
'-s',
'--seed',
type=int,
default=23,
help='seed for seed_everything',
)
parser.add_argument(
'-f',
'--postfix',
type=str,
default='',
help='post-postfix for default name',
)
parser.add_argument(
'-l',
'--logdir',
type=str,
default='logs',
help='directory for logging dat shit',
)
parser.add_argument(
'--scale_lr',
type=str2bool,
nargs='?',
const=True,
default=True,
help='scale base-lr by ngpu * batch_size * n_accumulate',
)
parser.add_argument(
'--resume_epoch',
type=str,
help='resume_epoch',
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id
* split_size:(worker_id + 1)
* split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id)
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self,
batch_size,
train=None,
validation=None,
test=None,
predict=None,
wrap=False,
num_workers=None,
shuffle_test_loader=False,
use_worker_init_fn=False,
shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else min(
batch_size * 2, multiprocessing.cpu_count())
self.use_worker_init_fn = use_worker_init_fn
if train is not None:
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs['validation'] = validation
self.val_dataloader = partial(
self._val_dataloader, shuffle=shuffle_val_dataloader)
if test is not None:
self.dataset_configs['test'] = test
self.test_dataloader = partial(
self._test_dataloader, shuffle=shuffle_test_loader)
if predict is not None:
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
self.datasets = dict(
| '''
using to test the difference between
'''
if version.parse(pl.__version__) > version.parse('1.4.2'):
else:
warnings.filterwarnings('ignore')
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
'-n',
'--name',
type=str,
const=True,
default='',
nargs='?',
help='postfix for logdir',
)
parser.add_argument(
'-r',
'--resume',
type=str,
const=True,
default='',
nargs='?',
help='resume from logdir or checkpoint in logdir',
)
parser.add_argument(
'-b',
'--base',
nargs='*',
metavar='base_config.yaml',
help='paths to base configs. Loaded from left-to-right. '
'Parameters can be overwritten or added with command-line options of the form `--key value`.',
default=list(),
)
parser.add_argument(
'-t',
'--train',
type=str2bool,
const=True,
default=False,
nargs='?',
help='train',
)
parser.add_argument(
'--no-test',
type=str2bool,
const=True,
default=False,
nargs='?',
help='disable test',
)
parser.add_argument(
'-p', '--project', help='name of new or path to existing project')
parser.add_argument(
'-d',
'--debug',
type=str2bool,
nargs='?',
const=True,
default=False,
help='enable post-mortem debugging',
)
parser.add_argument(
'-s',
'--seed',
type=int,
default=23,
help='seed for seed_everything',
)
parser.add_argument(
'-f',
'--postfix',
type=str,
default='',
help='post-postfix for default name',
)
parser.add_argument(
'-l',
'--logdir',
type=str,
default='logs',
help='directory for logging dat shit',
)
parser.add_argument(
'--scale_lr',
type=str2bool,
nargs='?',
const=True,
default=True,
help='scale base-lr by ngpu * batch_size * n_accumulate',
)
parser.add_argument(
'--resume_epoch',
type=str,
help='resume_epoch',
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = dataset.num_records // worker_info.num_workers
# reset num_records to the true number to retain reliable length information
dataset.sample_ids = dataset.valid_ids[worker_id
* split_size:(worker_id + 1)
* split_size]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed(np.random.get_state()[1][current_id] + worker_id)
else:
return np.random.seed(np.random.get_state()[1][0] + worker_id)
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self,
batch_size,
train=None,
validation=None,
test=None,
predict=None,
wrap=False,
num_workers=None,
shuffle_test_loader=False,
use_worker_init_fn=False,
shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else min(
batch_size * 2, multiprocessing.cpu_count())
self.use_worker_init_fn = use_worker_init_fn
if train is not None:
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs['validation'] = validation
self.val_dataloader = partial(
self._val_dataloader, shuffle=shuffle_val_dataloader)
if test is not None:
self.dataset_configs['test'] = test
self.test_dataloader = partial(
self._test_dataloader, shuffle=shuffle_test_loader)
if predict is not None:
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
self.datasets = dict( | (k, instantiate_from_config(self.dataset_configs[k])) | 1 | 2023-12-06 07:29:34+00:00 | 4k |
facebookresearch/DCI | reproduction/crowdsourcing/annotate/preprocessing/preprocess_assets_segev.py | [
{
"identifier": "get_groups_simple",
"path": "reproduction/crowdsourcing/annotate/preprocessing/mask_creation_utils.py",
"snippet": "TARGET_STEP = 100\nSKIP_LOGGING = True\nclass GroupItem(TypedDict):\nclass FinalGroup(TypedDict):\ndef jitter(size: float) -> float:\ndef bound(v, lo, hi):\ndef _load_fina... | import time
import sys
import numpy as np
import os
import base64
import cv2
import json
from segment_anything import sam_model_registry
from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator
from .mask_creation_utils import get_groups_simple, refine_groups_simple, FinalGrouping, FinalGroup, get_points_from_canny_greedy
from .efficient_mask import EfficientMask
from PIL import Image
from io import BytesIO
from typing import TypedDict, List | 3,393 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
| #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
| def fold_group_tree(g: FinalGrouping): | 0 | 2023-12-13 16:16:48+00:00 | 4k |
daswer123/xtts-webui | scripts/funcs.py | [
{
"identifier": "denoise",
"path": "scripts/resemble_enhance/enhancer/inference.py",
"snippet": "@torch.inference_mode()\ndef denoise(dwav, sr, device, run_dir=None):\n enhancer = load_enhancer(run_dir, device)\n return inference(model=enhancer.denoiser, dwav=dwav, sr=sr, device=device)"
},
{
... | import gc
import torchaudio
import torch
import numpy as np
import os
import ffmpeg
import shutil
import uuid
import subprocess
import soundfile as sf
import noisereduce
import tempfile
from scripts.resemble_enhance.enhancer.inference import denoise, enhance
from scipy.io import wavfile
from pathlib import Path
from pedalboard import (
Pedalboard,
NoiseGate,
Compressor,
LowShelfFilter,
Gain,
) | 2,345 | Gain(gain_db=0),
])
reduced_noise = noisereduce.reduce_noise(y=audio_data,
sr=sample_rate,
stationary=True,
prop_decrease=0.75)
processed_audio = board(reduced_noise.astype('float32'), sample_rate)
# processed_audio = board(audio_data.astype('float32'), sample_rate)
# Create a temporary file for the processed audio
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
sf.write(temp_file.name, processed_audio.T if processed_audio.ndim >
1 else processed_audio, sample_rate)
temp_file_path = temp_file.name
# Defining an output file name with a new extension in the same folder
output_path = f"{os.path.splitext(audio_path)[0]}_improved.{type_audio}"
# Convert the processed wav file to the target format using FFmpeg
stream = (
ffmpeg
.input(temp_file_path)
.output(output_path)
.overwrite_output()
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = stream.communicate()
if stream.returncode != 0:
raise Exception(f"FFmpeg error:\n{err.decode()}")
# Deleting a temporary wav file after it has been used
os.unlink(temp_file_path)
return output_path
def cut_audio(input_wav_path, duration):
output_wav_path = input_wav_path.with_name(
f"{input_wav_path.stem}_cut{input_wav_path.suffix}")
try:
(
ffmpeg
.input(str(input_wav_path))
.output(str(output_wav_path), t=duration)
.run(overwrite_output=True)
)
except ffmpeg.Error as e: # Catching specific ffmpeg Error here.
# Check if stderr or stdout have been captured before trying to decode.
stderr = e.stderr.decode('utf8') if e.stderr else "No stderr"
stdout = e.stdout.decode('utf8') if e.stdout else "No stdout"
print(f"stdout: {stdout}")
# More detailed error information will be printed/logged here.
print(f"stderr: {stderr}")
raise # Re-raise exception after logging details
return output_wav_path
# RESEMBLE ENHANCE
def save_audio(out_folder, file_name, rate, audio_data):
os.makedirs(out_folder, exist_ok=True)
file_path = os.path.join(out_folder, file_name)
with open(file_path, 'wb') as f:
wavfile.write(f, rate, audio_data)
return file_path
def clear_gpu_cache():
# del model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def resemble_enhance_audio(audio_path,
use_enhance,
use_denoise=False,
solver='Midpoint',
nfe=64,
tau=0.5,
chunk_seconds=8,
chunks_overlap=1,
denoising=False,
output_type="wav",
output_folder=""):
if audio_path is None:
return None, None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dwav, orig_sr = torchaudio.load(audio_path)
dwav = dwav.mean(dim=0).to(device)
denoise_path = None
enhance_path = None
if use_denoise:
wav1, new_sr_1 = denoise(dwav.cpu(), orig_sr, device)
denoise_file_name = f"{Path(audio_path).stem}_denoise.{output_type}"
out_folder = Path("./output") / output_folder
denoise_path = save_audio(
out_folder, denoise_file_name, new_sr_1, wav1.numpy())
if use_enhance:
lambd = 0.9 if denoising else 0.1
solver = solver.lower()
nfe = int(nfe)
|
def save_audio_to_wav(rate, y, this_dir, max_duration=None):
# Determine the bit rate of the source audio.
bit_depth = y.dtype.itemsize * 8
# Convert to 16-bit data if necessary.
if not (bit_depth == 16):
if bit_depth == 32:
audio_data = np.asarray(
y / np.max(np.abs(y)) * 32767, dtype=np.int16)
elif bit_depth == 24:
audio_data = np.asarray(
(y / (2**8)) // (2**(bit_depth - 16)), dtype=np.int16)
else: # For other types of bitness we apply the general normalization method.
max_val = float(np.iinfo(np.int16).max)
min_val = float(np.iinfo(np.int16).min)
audio_data = np.asarray(
((y - y.min()) / (y.max() - y.min())) * (max_val - min_val) + min_val, dtype=np.int16)
else:
# If the data is already in int16 format, use it directly.
audio_data = np.asarray(y, dtype=np.int16)
temp_folder = Path(this_dir) / 'temp'
# print(rate,y)
os.makedirs(temp_folder, exist_ok=True)
wav_name = f'speaker_ref_{uuid.uuid4()}.wav'
original_wav_path = str(temp_folder / wav_name)
# Save the audio data to a file without changing the sampling rate.
wavfile.write(original_wav_path, rate, audio_data)
if max_duration is not None and max_duration != 0:
output_wav_path = str(temp_folder / f'cut_{wav_name}')
(
ffmpeg.input(original_wav_path)
.output(output_wav_path, t=max_duration)
.run(overwrite_output=True, quiet=True)
)
os.remove(original_wav_path)
return output_wav_path
return original_wav_path
def resample_audio(input_wav_path, this_dir, target_rate=22050):
temp_folder = Path(this_dir) / 'temp'
temp_folder.mkdir(parents=True, exist_ok=True)
output_wav_name = f"resampled_audio_{uuid.uuid4()}.wav"
output_wav_path = temp_folder / output_wav_name
(
ffmpeg
.input(str(input_wav_path))
.output(str(output_wav_path), ar=target_rate, acodec='pcm_s16le', ac=1)
.run(overwrite_output=True, quiet=True)
)
return str(output_wav_path)
def improve_ref_audio(input_wav_path, this_dir):
input_wav_path = Path(input_wav_path)
this_dir = Path(this_dir)
temp_folder = Path(this_dir) / 'temp'
temp_folder.mkdir(parents=True, exist_ok=True)
# Generating output file name
out_filename = temp_folder / f"{input_wav_path.stem}_refined.wav"
print(input_wav_path)
# Applying filters to an audio stream using ffmpeg-python
(
ffmpeg
.input(str(input_wav_path))
.filter('lowpass', frequency=8000)
.filter('highpass', frequency=75)
.filter_('areverse')
.filter_('silenceremove', start_periods=1, start_silence=0, start_threshold=0.02)
.filter_('areverse')
.filter_('silenceremove', start_periods=1, start_silence=0, start_threshold=0.02)
.output(str(out_filename))
.overwrite_output()
.run(quiet=True)
)
return str(out_filename)
def move_and_rename_file(file_path, target_folder_path, new_file_name):
# Make sure that the new file name contains the correct .wav extension
if not new_file_name.lower().endswith('.wav'):
new_file_name += '.wav'
# Create Path objects for easy handling of paths
file_path = Path(file_path)
target_folder_path = Path(target_folder_path)
# Creating a target directory if it does not exist
target_folder_path.mkdir(parents=True, exist_ok=True)
# Full path to the new file in the destination folder
target_file_path = target_folder_path / new_file_name
# Move and rename a file
file_path.rename(target_file_path)
def improve_and_convert_audio(audio_path, type_audio):
# Read audio file and apply effects via Pedalboard
audio_data, sample_rate = sf.read(audio_path)
board = Pedalboard([
NoiseGate(threshold_db=-30, ratio=1.5, release_ms=250),
Compressor(threshold_db=12, ratio=2.5),
LowShelfFilter(cutoff_frequency_hz=400, gain_db=5),
Gain(gain_db=0),
])
reduced_noise = noisereduce.reduce_noise(y=audio_data,
sr=sample_rate,
stationary=True,
prop_decrease=0.75)
processed_audio = board(reduced_noise.astype('float32'), sample_rate)
# processed_audio = board(audio_data.astype('float32'), sample_rate)
# Create a temporary file for the processed audio
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
sf.write(temp_file.name, processed_audio.T if processed_audio.ndim >
1 else processed_audio, sample_rate)
temp_file_path = temp_file.name
# Defining an output file name with a new extension in the same folder
output_path = f"{os.path.splitext(audio_path)[0]}_improved.{type_audio}"
# Convert the processed wav file to the target format using FFmpeg
stream = (
ffmpeg
.input(temp_file_path)
.output(output_path)
.overwrite_output()
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = stream.communicate()
if stream.returncode != 0:
raise Exception(f"FFmpeg error:\n{err.decode()}")
# Deleting a temporary wav file after it has been used
os.unlink(temp_file_path)
return output_path
def cut_audio(input_wav_path, duration):
output_wav_path = input_wav_path.with_name(
f"{input_wav_path.stem}_cut{input_wav_path.suffix}")
try:
(
ffmpeg
.input(str(input_wav_path))
.output(str(output_wav_path), t=duration)
.run(overwrite_output=True)
)
except ffmpeg.Error as e: # Catching specific ffmpeg Error here.
# Check if stderr or stdout have been captured before trying to decode.
stderr = e.stderr.decode('utf8') if e.stderr else "No stderr"
stdout = e.stdout.decode('utf8') if e.stdout else "No stdout"
print(f"stdout: {stdout}")
# More detailed error information will be printed/logged here.
print(f"stderr: {stderr}")
raise # Re-raise exception after logging details
return output_wav_path
# RESEMBLE ENHANCE
def save_audio(out_folder, file_name, rate, audio_data):
os.makedirs(out_folder, exist_ok=True)
file_path = os.path.join(out_folder, file_name)
with open(file_path, 'wb') as f:
wavfile.write(f, rate, audio_data)
return file_path
def clear_gpu_cache():
# del model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def resemble_enhance_audio(audio_path,
use_enhance,
use_denoise=False,
solver='Midpoint',
nfe=64,
tau=0.5,
chunk_seconds=8,
chunks_overlap=1,
denoising=False,
output_type="wav",
output_folder=""):
if audio_path is None:
return None, None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dwav, orig_sr = torchaudio.load(audio_path)
dwav = dwav.mean(dim=0).to(device)
denoise_path = None
enhance_path = None
if use_denoise:
wav1, new_sr_1 = denoise(dwav.cpu(), orig_sr, device)
denoise_file_name = f"{Path(audio_path).stem}_denoise.{output_type}"
out_folder = Path("./output") / output_folder
denoise_path = save_audio(
out_folder, denoise_file_name, new_sr_1, wav1.numpy())
if use_enhance:
lambd = 0.9 if denoising else 0.1
solver = solver.lower()
nfe = int(nfe)
| wav2, new_sr_2 = enhance(dwav=dwav.cpu(), sr=orig_sr, device=device, | 1 | 2023-12-14 06:34:12+00:00 | 4k |
FrozenBurning/PrimDiffusion | dva/io.py | [
{
"identifier": "AttrDict",
"path": "dva/attr_dict.py",
"snippet": "class AttrDict:\n def __init__(self, entries):\n self.add_entries_(entries)\n\n def keys(self):\n return self.__dict__.keys()\n\n def values(self):\n return self.__dict__.values()\n\n def __getitem__(sel... | import json
import cv2
import numpy as np
import copy
import importlib
import pickle
import os
from typing import Any, Dict
from dva.attr_dict import AttrDict
from dva.geom import compute_v2uv, compute_neighbours | 1,758 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
}
topology["v2uv"] = compute_v2uv(
topology["n_verts"], topology["vi"], topology["vti"]
)
nbs_idxs, nbs_weights = compute_neighbours(
topology["v"].shape[0], topology["vi"], 8
)
topology.update({"nbs_idxs": nbs_idxs, "nbs_weights": nbs_weights})
return {
"topology": topology,
"lbs_template_verts": data_struct["v_template"].astype(np.float32),
}
def read_pickle(pkl_path):
with open(pkl_path, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def load_static_assets_crossid_smpl(config):
# with chumpy dependency!!!
data_struct = read_pickle(config.data.smpl_topology)
vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))
ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))
n_verts = data_struct["v_template"].shape[0]
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def load_module(module_name, class_name=None, silent: bool = False):
module = importlib.import_module(module_name)
return getattr(module, class_name) if class_name else module
def load_class(class_name):
return load_module(*class_name.rsplit(".", 1))
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
def load_opencv_calib(extrin_path, intrin_path):
cameras = {}
fse = cv2.FileStorage()
fse.open(extrin_path, cv2.FileStorage_READ)
fsi = cv2.FileStorage()
fsi.open(intrin_path, cv2.FileStorage_READ)
names = [
fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size())
]
for camera in names:
rot = fse.getNode(f"R_{camera}").mat()
R = fse.getNode(f"Rot_{camera}").mat()
T = fse.getNode(f"T_{camera}").mat()
R_pred = cv2.Rodrigues(rot)[0]
assert np.all(np.isclose(R_pred, R))
K = fsi.getNode(f"K_{camera}").mat()
cameras[camera] = {
"Rt": np.concatenate([R, T], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
return cameras
def load_smpl_params(params):
return {
k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id"
}
def load_smpl_topology(data_struct) -> Dict[str, Any]:
# TODO: compute_
topology = {
"vi": data_struct["f"].astype(np.int64),
"vti": data_struct["ft"].astype(np.int64),
"vt": data_struct["vt"].astype(np.float32),
"n_verts": data_struct["v_template"].shape[0],
}
topology["v2uv"] = compute_v2uv(
topology["n_verts"], topology["vi"], topology["vti"]
)
nbs_idxs, nbs_weights = compute_neighbours(
topology["v"].shape[0], topology["vi"], 8
)
topology.update({"nbs_idxs": nbs_idxs, "nbs_weights": nbs_weights})
return {
"topology": topology,
"lbs_template_verts": data_struct["v_template"].astype(np.float32),
}
def read_pickle(pkl_path):
with open(pkl_path, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
def load_static_assets_crossid_smpl(config):
# with chumpy dependency!!!
data_struct = read_pickle(config.data.smpl_topology)
vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))
ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))
n_verts = data_struct["v_template"].shape[0]
| topology = AttrDict( | 0 | 2023-12-06 05:12:55+00:00 | 4k |
Nearcyan/papers.day | scrape_abs.py | [
{
"identifier": "ArxivPaper",
"path": "backend/models.py",
"snippet": "class ArxivPaper(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n arxiv_id = models.CharField(max_length=20, unique=True)\n\n # fields scraped fro... | import argparse
import shutil
import tempfile
import django
import fitz
import random
import requests
import re
import tarfile
import os
from openai import OpenAI
from scholarly import scholarly # if this breaks, run pip install --upgrade httpx
from scholarly import ProxyGenerator
from datetime import datetime
from bs4 import BeautifulSoup
from django.core.files.base import ContentFile
from django.conf import settings
from backend.models import ArxivPaper, Author, Subject, PaperImage, PaperSource | 3,589 | jref = None
comments = soup.find('td', class_='tablecell comments')
if comments:
comments = comments.get_text(strip=True)
comments = re.sub(r'Comments:', '', comments)
comments = re.sub(r'\n', '', comments)
comments = re.sub(r' ', '', comments)
print(f'[{arxiv_id}] Comments: {comments}')
else:
comments = None
doi = soup.find('td', class_='tablecell arxivdoi')
if doi:
doi = doi.find('a')
doi = doi.get_text(strip=True)
doi = re.sub(r'DOI:', '', doi)
doi = re.sub(r'\n', '', doi)
doi = re.sub(r' ', '', doi)
print(f'[{arxiv_id}] DOI: {doi}')
else:
doi = None
# Get the date
date_tag = soup.find('div', class_='dateline')
date_string = date_tag.get_text(strip=True)
date_string = re.sub(r' \(v.*\)', '', date_string)
date_match = re.search(r'\[Submitted on (.+)\]', date_string)
if date_match:
date_string = date_match.group(1)
date = datetime.strptime(date_string, '%d %b %Y').date()
else:
date = None
# Download the pdf
pdf_url = f'https://arxiv.org/pdf/{arxiv_id}.pdf'
try:
pdf_response = requests.get(pdf_url)
if pdf_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}: {e}')
return None
pdf_content = pdf_response.content
pdf_file = ContentFile(pdf_content, name=f'{arxiv_id}.pdf')
# Download the source
source_url = f'https://arxiv.org/e-print/{arxiv_id}'
try:
source_response = requests.get(source_url)
print(f'[{arxiv_id}] Downloading source from {source_url}')
if source_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}: {e}')
return None
source_content = source_response.content
source_tar = ContentFile(source_content, name=f'{arxiv_id}.tar.gz')
paper = ArxivPaper.objects.create(title=title, abstract=abstract, publication_date=date, arxiv_id=arxiv_id, doi=doi,
pdf=pdf_file, primary_subject=prim_subject, journal_ref=jref, comment=comments,
source_tar=source_tar)
# extract the source:
temp_dir = tempfile.mkdtemp()
try:
extract_tar_gz(paper.source_tar.path, temp_dir)
# grab all images from the source:
images = create_image_objects(temp_dir, paper)
for image in images:
paper.images.add(image)
print(f'[{arxiv_id}] Added {len(images)} images')
sources = create_tex_files(temp_dir, paper)
for source in sources:
paper.sources.add(source)
print(f'[{arxiv_id}] Added {len(sources)} sources')
except Exception as e:
print(f'[{arxiv_id}] Error occurred while extracting source: {e}')
# not a fatal exception, some papers do not provide tar.gz files and the source can just be e.g. a pdf
finally:
delete_files(temp_dir)
# Get a screenshot
screenshot_path = get_paper_screenshot_from_pdf(paper.pdf.path)
if screenshot_path:
screenshot = ContentFile(open(screenshot_path, 'rb').read(), name=f'{arxiv_id}.png')
paper.screenshot = screenshot
os.remove(screenshot_path)
# get a summary
try:
summary = get_paper_summary_from_abstract(paper.abstract)
paper.summary = summary
paper.save()
except Exception as e:
print(f"Exception while generating completion: {e}")
paper.delete()
return None
# get number of citations
if google_scholar:
try:
search_query = scholarly.search_pubs(f'"{paper.title}"', patents=False, citations=False)
first_paper_result = next(search_query)
citations = first_paper_result['num_citations']
paper.citations = citations
paper.save()
print(f'[{arxiv_id}] Citations: {citations}')
if citations > 1000:
interesting_paper = True
print(f'[{arxiv_id}] Interesting paper: {citations} citations')
except Exception as e:
print(f'[{arxiv_id}] Could not find paper on Google Scholar')
total_author_citations = 0
for author_name in authors:
# get author if exists:
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'papers.settings')
django.setup()
def extract_tar_gz(file_path: str, output_dir: str) -> None:
"""
Extract a tar.gz file to the specified output directory
:param file_path: The path to the tar.gz file
:param output_dir: The directory to extract the tar.gz file to
:return: None
"""
with tarfile.open(file_path, 'r:gz') as tar:
tar.extractall(output_dir)
def create_image_objects(directory: str, paper) -> list:
"""
Given a directory which contains images, this function will create PaperImage objects for each image
:param directory: The directory containing the images
:return: The list of PaperImage objects
"""
image_files = [os.path.join(root, f) for root, _, files in os.walk(directory) for f in files if
f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))]
images = []
for image_file in image_files:
image_path = os.path.join(directory, image_file)
with open(image_path, 'rb') as file:
filename = paper.arxiv_id + '_' + os.path.basename(image_path)
django_file = ContentFile(file.read(), name=filename)
image = PaperImage(image=django_file, paper=paper)
image.save()
images.append(image)
return images
def create_tex_files(directory: str, paper) -> list:
"""
Given a directory which contains tex files, this function will create PaperSource objects for each tex file
:param directory: The directory containing the tex files
:return: The list of PaperSource objects
"""
tex_files = [f for f in os.listdir(directory) if f.lower().endswith('.tex')]
sources = []
for tex_file in tex_files:
tex_path = os.path.join(directory, tex_file)
with open(tex_path, 'r') as f:
tex_content = f.read()
source = PaperSource(content=tex_content, paper=paper)
source.save()
sources.append(source)
return sources
def delete_files(directory: str) -> None:
"""
Delete all files in a directory
:param directory: The directory to delete the files from
:return: None
"""
for root, dirs, files in os.walk(directory):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def get_paper_screenshot_from_pdf(pdf_path) -> str:
"""
Get a screenshot of the first page of the pdf
:param pdf_path: The path to the pdf
:return: The path to the screenshot
"""
try:
pdf = fitz.open(pdf_path)
page = pdf.load_page(0)
pix = page.get_pixmap(alpha=False)
random_int = random.randint(0, 1000000)
temp_filename = f'temp_{random_int}.png'
pix.save(temp_filename, "png")
return temp_filename
except Exception as e:
print(f'Error occurred while getting screenshot of pdf: {pdf_path}')
return None
def get_paper_summary_from_abstract(abstract: str) -> str:
"""
Get a summary of the paper from the abstract
:param abstract: The abstract of the paper
:return: The summary of the paper
"""
client = OpenAI()
client.api_key = settings.OPENAI_API_KEY
prompt = f"Summarize the following AI paper abstract in two sentences:\nAbstract: {abstract}\nSummary:"
response = client.completions.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.9,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
summary = response.choices[0].text
return summary.strip()
def scrape_paper(arxiv_id, google_scholar=False):
"""
Scrape the paper with the given arxiv_id and save it to the database
:param arxiv_id: The arxiv_id of the paper
:param google_scholar: True if google scholar lookups should be performed, else false
:return: The saved ArxivPaper object
"""
# Send a GET request to the URL and retrieve the HTML content
url = f'https://arxiv.org/abs/{arxiv_id}'
if ArxivPaper.objects.filter(arxiv_id=arxiv_id).exists():
print(f'[{arxiv_id}] Paper with id {arxiv_id} already exists')
return None
else:
print(f'[{arxiv_id}] Scraping paper: {url}')
try:
response = requests.get(url)
html_content = response.content
except Exception as e:
print(f'[{arxiv_id}] Error occurred while scraping {url}')
return None
# Create a BeautifulSoup object to parse the HTML
soup = BeautifulSoup(html_content, 'html.parser')
# Get the title
title_tag = soup.find('h1', class_='title')
title = title_tag.get_text(strip=True)
title = re.sub(r'Title:', '', title)
print(f'[{arxiv_id}] Title: {title}')
# Get the abstract
abstract_tag = soup.find('blockquote', class_='abstract')
abstract = abstract_tag.get_text(strip=True)
# remove various things
abstract = re.sub(r'Abstract:', '', abstract)
abstract = re.sub(r'\n', ' ', abstract)
abstract = re.sub(r' ', ' ', abstract)
# Get the authors
author_div = soup.find('div', class_='authors')
author_tags = author_div.find_all('a')
authors = [author.get_text(strip=True) for author in author_tags]
# Get the primary subject
primary_subject = soup.find('span', class_='primary-subject').get_text(strip=True)
short_name = primary_subject.split('(')[1].replace(')', '').strip()
full_name = primary_subject.split('(')[0].strip()
print(f'[{arxiv_id}] Primary subject: {short_name} - {full_name}')
prim_subject = Subject.objects.filter(short_name=short_name).first()
if not prim_subject:
prim_subject = Subject.objects.create(short_name=short_name, full_name=full_name)
print(f'[{arxiv_id}] Creating subject: {short_name} - {full_name}')
# get everything inside of 'subjects' that is not in a <span>:
subject_div = soup.find('td', class_='subjects')
subject_text = subject_div.get_text(strip=True)
subject_text = re.sub(r'<span.*span>', '', subject_text)
subject_list = subject_text.split(';')
subject_list = [subject.strip() for subject in subject_list]
subjects = [subject for subject in subject_list if subject]
jref = soup.find('td', class_='tablecell jref')
if jref:
jref = jref.get_text(strip=True)
jref = re.sub(r'Journal ref:', '', jref)
jref = re.sub(r'\n', '', jref)
jref = re.sub(r' ', '', jref)
print(f'[{arxiv_id}] Journal ref: {jref}')
else:
jref = None
comments = soup.find('td', class_='tablecell comments')
if comments:
comments = comments.get_text(strip=True)
comments = re.sub(r'Comments:', '', comments)
comments = re.sub(r'\n', '', comments)
comments = re.sub(r' ', '', comments)
print(f'[{arxiv_id}] Comments: {comments}')
else:
comments = None
doi = soup.find('td', class_='tablecell arxivdoi')
if doi:
doi = doi.find('a')
doi = doi.get_text(strip=True)
doi = re.sub(r'DOI:', '', doi)
doi = re.sub(r'\n', '', doi)
doi = re.sub(r' ', '', doi)
print(f'[{arxiv_id}] DOI: {doi}')
else:
doi = None
# Get the date
date_tag = soup.find('div', class_='dateline')
date_string = date_tag.get_text(strip=True)
date_string = re.sub(r' \(v.*\)', '', date_string)
date_match = re.search(r'\[Submitted on (.+)\]', date_string)
if date_match:
date_string = date_match.group(1)
date = datetime.strptime(date_string, '%d %b %Y').date()
else:
date = None
# Download the pdf
pdf_url = f'https://arxiv.org/pdf/{arxiv_id}.pdf'
try:
pdf_response = requests.get(pdf_url)
if pdf_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading pdf from {pdf_url}: {e}')
return None
pdf_content = pdf_response.content
pdf_file = ContentFile(pdf_content, name=f'{arxiv_id}.pdf')
# Download the source
source_url = f'https://arxiv.org/e-print/{arxiv_id}'
try:
source_response = requests.get(source_url)
print(f'[{arxiv_id}] Downloading source from {source_url}')
if source_response.status_code != 200:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}')
return None
except Exception as e:
print(f'[{arxiv_id}] Error occurred while downloading source from {source_url}: {e}')
return None
source_content = source_response.content
source_tar = ContentFile(source_content, name=f'{arxiv_id}.tar.gz')
paper = ArxivPaper.objects.create(title=title, abstract=abstract, publication_date=date, arxiv_id=arxiv_id, doi=doi,
pdf=pdf_file, primary_subject=prim_subject, journal_ref=jref, comment=comments,
source_tar=source_tar)
# extract the source:
temp_dir = tempfile.mkdtemp()
try:
extract_tar_gz(paper.source_tar.path, temp_dir)
# grab all images from the source:
images = create_image_objects(temp_dir, paper)
for image in images:
paper.images.add(image)
print(f'[{arxiv_id}] Added {len(images)} images')
sources = create_tex_files(temp_dir, paper)
for source in sources:
paper.sources.add(source)
print(f'[{arxiv_id}] Added {len(sources)} sources')
except Exception as e:
print(f'[{arxiv_id}] Error occurred while extracting source: {e}')
# not a fatal exception, some papers do not provide tar.gz files and the source can just be e.g. a pdf
finally:
delete_files(temp_dir)
# Get a screenshot
screenshot_path = get_paper_screenshot_from_pdf(paper.pdf.path)
if screenshot_path:
screenshot = ContentFile(open(screenshot_path, 'rb').read(), name=f'{arxiv_id}.png')
paper.screenshot = screenshot
os.remove(screenshot_path)
# get a summary
try:
summary = get_paper_summary_from_abstract(paper.abstract)
paper.summary = summary
paper.save()
except Exception as e:
print(f"Exception while generating completion: {e}")
paper.delete()
return None
# get number of citations
if google_scholar:
try:
search_query = scholarly.search_pubs(f'"{paper.title}"', patents=False, citations=False)
first_paper_result = next(search_query)
citations = first_paper_result['num_citations']
paper.citations = citations
paper.save()
print(f'[{arxiv_id}] Citations: {citations}')
if citations > 1000:
interesting_paper = True
print(f'[{arxiv_id}] Interesting paper: {citations} citations')
except Exception as e:
print(f'[{arxiv_id}] Could not find paper on Google Scholar')
total_author_citations = 0
for author_name in authors:
# get author if exists: | author = Author.objects.filter(name=author_name).first() | 1 | 2023-12-14 08:23:05+00:00 | 4k |
yanzq95/SGNet | train.py | [
{
"identifier": "Middlebury_dataset",
"path": "data/middlebury_dataloader.py",
"snippet": "class Middlebury_dataset(Dataset):\n \"\"\"RGB-D-D Dataset.\"\"\"\n\n def __init__(self, root_dir, scale=8, transform=None):\n \"\"\"\n Args:\n root_dir (string): Directory with all ... | import os
import torch
import numpy as np
import argparse
import torch.optim as optim
import torch.nn as nn
import logging
import os
from numpy.core.fromnumeric import mean
from models.SGNet import *
from models.common import *
from data.nyu_dataloader import *
from data.rgbdd_dataloader import *
from data.middlebury_dataloader import Middlebury_dataset
from utils import calc_rmse, rgbdd_calc_rmse, midd_calc_rmse
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from tqdm import tqdm
from datetime import datetime | 2,353 | parser = argparse.ArgumentParser()
parser.add_argument('--scale', type=int, default=8, help='scale factor')
parser.add_argument('--lr', default='0.0001', type=float, help='learning rate')
parser.add_argument('--result', default='experiment', help='learning rate')
parser.add_argument('--epoch', default=200, type=int, help='max epoch')
parser.add_argument('--device', default="0", type=str, help='which gpu use')
parser.add_argument("--decay_iterations", type=list, default=[5e4, 1e5, 1.6e5], help="steps to start lr decay")
parser.add_argument("--num_feats", type=int, default=40, help="channel number of the middle hidden layer")
parser.add_argument("--gamma", type=float, default=0.2, help="decay rate of learning rate")
parser.add_argument("--root_dir", type=str, default='/opt/data/share/120106010699/nyu_data', help="root dir of dataset")
parser.add_argument("--batchsize", type=int, default=1, help="batchsize of training dataloader")
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = opt.device
s = datetime.now().strftime('%Y%m%d%H%M%S')
dataset_name = opt.root_dir.split('/')[-1]
result_root = '%s/%s-lr_%s-s_%s-%s-b_%s' % (opt.result, s, opt.lr, opt.scale, dataset_name, opt.batchsize)
if not os.path.exists(result_root):
os.mkdir(result_root)
logging.basicConfig(filename='%s/train.log' % result_root, format='%(asctime)s %(message)s', level=logging.INFO)
logging.info(opt)
net = SGNet(num_feats=opt.num_feats, kernel_size=3, scale=opt.scale).cuda()
net_getFre = get_Fre()
net_grad = Get_gradient_nopadding_d()
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.decay_iterations, gamma=opt.gamma)
net.train()
data_transform = transforms.Compose([transforms.ToTensor()])
up = nn.Upsample(scale_factor=opt.scale, mode='bicubic')
if dataset_name == 'nyu_data':
test_minmax = np.load('%s/test_minmax.npy' % opt.root_dir)
train_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=True)
test_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=False)
if dataset_name == 'RGB-D-D':
train_dataset = NYU_v2_datset(root_dir='/data/SRData/NYU_v2', scale=opt.scale, transform=data_transform, train=True)
test_dataset = RGBDD_Dataset(root_dir=opt.root_dir, scale=opt.scale, downsample='bicubic', transform=data_transform,
train=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchsize, shuffle=True, num_workers=8)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)
max_epoch = opt.epoch
num_train = len(train_dataloader)
best_rmse = 10.0
best_epoch = 0
for epoch in range(max_epoch):
# ---------
# Training
# ---------
net.train()
running_loss = 0.0
t = tqdm(iter(train_dataloader), leave=True, total=len(train_dataloader))
for idx, data in enumerate(t):
batches_done = num_train * epoch + idx
optimizer.zero_grad()
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
out_amp, out_pha = net_getFre(out)
gt_amp, gt_pha = net_getFre(gt)
gt_grad = net_grad(gt)
loss_grad1 = criterion(out_grad, gt_grad)
loss_fre_amp = criterion(out_amp, gt_amp)
loss_fre_pha = criterion(out_pha, gt_pha)
loss_fre = 0.5 * loss_fre_amp + 0.5 * loss_fre_pha
loss_spa = criterion(out, gt)
loss = loss_spa + 0.002 * loss_fre + 0.001 * loss_grad1
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.data.item()
running_loss_50 = running_loss
if idx % 50 == 0:
running_loss_50 /= 50
t.set_description('[train epoch:%d] loss: %.8f' % (epoch + 1, running_loss_50))
t.refresh()
logging.info('epoch:%d iteration:%d running_loss:%.10f' % (epoch + 1, batches_done + 1, running_loss / num_train))
if (epoch % 2 == 0) and (epoch < 30):
with torch.no_grad():
net.eval()
if dataset_name == 'nyu_data':
rmse = np.zeros(449)
if dataset_name == 'RGB-D-D':
rmse = np.zeros(405)
t = tqdm(iter(test_dataloader), leave=True, total=len(test_dataloader))
for idx, data in enumerate(t):
if dataset_name == 'nyu_data':
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
minmax = test_minmax[:, idx]
minmax = torch.from_numpy(minmax).cuda()
rmse[idx] = calc_rmse(gt[0, 0], out[0, 0], minmax)
if dataset_name == 'RGB-D-D':
guidance, lr, gt, max, min = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda(), data[
'max'].cuda(), data['min'].cuda()
out = net((guidance, lr))
minmax = [max, min]
|
parser = argparse.ArgumentParser()
parser.add_argument('--scale', type=int, default=8, help='scale factor')
parser.add_argument('--lr', default='0.0001', type=float, help='learning rate')
parser.add_argument('--result', default='experiment', help='learning rate')
parser.add_argument('--epoch', default=200, type=int, help='max epoch')
parser.add_argument('--device', default="0", type=str, help='which gpu use')
parser.add_argument("--decay_iterations", type=list, default=[5e4, 1e5, 1.6e5], help="steps to start lr decay")
parser.add_argument("--num_feats", type=int, default=40, help="channel number of the middle hidden layer")
parser.add_argument("--gamma", type=float, default=0.2, help="decay rate of learning rate")
parser.add_argument("--root_dir", type=str, default='/opt/data/share/120106010699/nyu_data', help="root dir of dataset")
parser.add_argument("--batchsize", type=int, default=1, help="batchsize of training dataloader")
opt = parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"] = opt.device
s = datetime.now().strftime('%Y%m%d%H%M%S')
dataset_name = opt.root_dir.split('/')[-1]
result_root = '%s/%s-lr_%s-s_%s-%s-b_%s' % (opt.result, s, opt.lr, opt.scale, dataset_name, opt.batchsize)
if not os.path.exists(result_root):
os.mkdir(result_root)
logging.basicConfig(filename='%s/train.log' % result_root, format='%(asctime)s %(message)s', level=logging.INFO)
logging.info(opt)
net = SGNet(num_feats=opt.num_feats, kernel_size=3, scale=opt.scale).cuda()
net_getFre = get_Fre()
net_grad = Get_gradient_nopadding_d()
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=opt.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.decay_iterations, gamma=opt.gamma)
net.train()
data_transform = transforms.Compose([transforms.ToTensor()])
up = nn.Upsample(scale_factor=opt.scale, mode='bicubic')
if dataset_name == 'nyu_data':
test_minmax = np.load('%s/test_minmax.npy' % opt.root_dir)
train_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=True)
test_dataset = NYU_v2_datset(root_dir=opt.root_dir, scale=opt.scale, transform=data_transform, train=False)
if dataset_name == 'RGB-D-D':
train_dataset = NYU_v2_datset(root_dir='/data/SRData/NYU_v2', scale=opt.scale, transform=data_transform, train=True)
test_dataset = RGBDD_Dataset(root_dir=opt.root_dir, scale=opt.scale, downsample='bicubic', transform=data_transform,
train=False)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batchsize, shuffle=True, num_workers=8)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)
max_epoch = opt.epoch
num_train = len(train_dataloader)
best_rmse = 10.0
best_epoch = 0
for epoch in range(max_epoch):
# ---------
# Training
# ---------
net.train()
running_loss = 0.0
t = tqdm(iter(train_dataloader), leave=True, total=len(train_dataloader))
for idx, data in enumerate(t):
batches_done = num_train * epoch + idx
optimizer.zero_grad()
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
out_amp, out_pha = net_getFre(out)
gt_amp, gt_pha = net_getFre(gt)
gt_grad = net_grad(gt)
loss_grad1 = criterion(out_grad, gt_grad)
loss_fre_amp = criterion(out_amp, gt_amp)
loss_fre_pha = criterion(out_pha, gt_pha)
loss_fre = 0.5 * loss_fre_amp + 0.5 * loss_fre_pha
loss_spa = criterion(out, gt)
loss = loss_spa + 0.002 * loss_fre + 0.001 * loss_grad1
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.data.item()
running_loss_50 = running_loss
if idx % 50 == 0:
running_loss_50 /= 50
t.set_description('[train epoch:%d] loss: %.8f' % (epoch + 1, running_loss_50))
t.refresh()
logging.info('epoch:%d iteration:%d running_loss:%.10f' % (epoch + 1, batches_done + 1, running_loss / num_train))
if (epoch % 2 == 0) and (epoch < 30):
with torch.no_grad():
net.eval()
if dataset_name == 'nyu_data':
rmse = np.zeros(449)
if dataset_name == 'RGB-D-D':
rmse = np.zeros(405)
t = tqdm(iter(test_dataloader), leave=True, total=len(test_dataloader))
for idx, data in enumerate(t):
if dataset_name == 'nyu_data':
guidance, lr, gt = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda()
out, out_grad = net((guidance, lr))
minmax = test_minmax[:, idx]
minmax = torch.from_numpy(minmax).cuda()
rmse[idx] = calc_rmse(gt[0, 0], out[0, 0], minmax)
if dataset_name == 'RGB-D-D':
guidance, lr, gt, max, min = data['guidance'].cuda(), data['lr'].cuda(), data['gt'].cuda(), data[
'max'].cuda(), data['min'].cuda()
out = net((guidance, lr))
minmax = [max, min] | rmse[idx] = rgbdd_calc_rmse(gt[0, 0], out[0, 0], minmax) | 2 | 2023-12-10 04:41:17+00:00 | 4k |
LSimon95/megatts2 | models/megatts2.py | [
{
"identifier": "MRTE",
"path": "modules/mrte.py",
"snippet": "class MRTE(nn.Module):\n def __init__(\n self, \n mel_bins: int = HIFIGAN_MEL_CHANNELS,\n mel_frames: int = HIFIGAN_HOP_LENGTH,\n attn_dim: int = 512,\n ff_dim: int = 1024,\n ... | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.mrte import MRTE
from modules.vqpe import VQProsodyEncoder
from modules.convnet import ConvNet
from modules.embedding import SinePositionalEmbedding, TokenEmbedding
from modules.transformer import TransformerEncoder, TransformerEncoderLayer
from einops import rearrange | 3,193 |
class MegaVQ(nn.Module):
def __init__(
self,
mrte: MRTE,
|
class MegaVQ(nn.Module):
def __init__(
self,
mrte: MRTE, | vqpe: VQProsodyEncoder, | 1 | 2023-12-10 15:02:54+00:00 | 4k |
ml-stat-Sustech/TorchCP | tests/test_regression.py | [
{
"identifier": "ACI",
"path": "torchcp/regression/predictors/aci.py",
"snippet": "class ACI(SplitPredictor):\n \"\"\"\n Adaptive conformal inference (Gibbs et al., 2021)\n paper: https://arxiv.org/abs/2106.00170\n\n :param model: a pytorch model that can output the values of different quant... | import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from torchcp.regression.predictors import SplitPredictor,CQR,ACI
from torchcp.regression.loss import QuantileLoss
from torchcp.regression import Metrics
from torchcp.utils import fix_randomness
from utils import build_reg_data, build_regression_model | 3,114 |
def train(model, device, epoch, train_data_loader, criterion, optimizer):
for index, (tmp_x, tmp_y) in enumerate(train_data_loader):
outputs = model(tmp_x.to(device))
loss = criterion(outputs, tmp_y.unsqueeze(dim=1).to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_SplitPredictor():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=1)
X, y = build_reg_data()
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
split_index1 = int(len(indices) * 0.4)
split_index2 = int(len(indices) * 0.6)
part1, part2, part3 = np.split(indices, [split_index1, split_index2])
scalerX = StandardScaler()
scalerX = scalerX.fit(X[part1, :])
train_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part1, :])), torch.from_numpy(y[part1]))
cal_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part2, :])), torch.from_numpy(y[part2]))
test_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part3, :])), torch.from_numpy(y[part3]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
cal_data_loader = torch.utils.data.DataLoader(cal_dataset, batch_size=100, shuffle=False, pin_memory=True)
test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, pin_memory=True)
epochs = 100
alpha = 0.1
##################################
# Split Conformal Prediction
##################################
print("########################## SplitPredictor ###########################")
model = build_regression_model("NonLinearNet")(X.shape[1], 1, 64, 0.5).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(epochs):
train(model, device, epoch, train_data_loader, criterion, optimizer)
model.eval()
predictor = SplitPredictor(model)
predictor.calibrate(cal_data_loader, alpha)
print(predictor.evaluate(test_data_loader))
def test_time_series():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=2)
X, y = build_reg_data(data_name="synthetic")
num_examples = X.shape[0]
T0 = int(num_examples * 0.4)
train_dataset = TensorDataset(torch.from_numpy(X[:T0, :]), torch.from_numpy(y[:T0]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
alpha = 0.1
quantiles = [alpha / 2, 1 - alpha / 2]
model = build_regression_model("NonLinearNet")(X.shape[1], 2, 64, 0.5).to(device)
|
def train(model, device, epoch, train_data_loader, criterion, optimizer):
for index, (tmp_x, tmp_y) in enumerate(train_data_loader):
outputs = model(tmp_x.to(device))
loss = criterion(outputs, tmp_y.unsqueeze(dim=1).to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_SplitPredictor():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=1)
X, y = build_reg_data()
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
split_index1 = int(len(indices) * 0.4)
split_index2 = int(len(indices) * 0.6)
part1, part2, part3 = np.split(indices, [split_index1, split_index2])
scalerX = StandardScaler()
scalerX = scalerX.fit(X[part1, :])
train_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part1, :])), torch.from_numpy(y[part1]))
cal_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part2, :])), torch.from_numpy(y[part2]))
test_dataset = TensorDataset(torch.from_numpy(scalerX.transform(X[part3, :])), torch.from_numpy(y[part3]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
cal_data_loader = torch.utils.data.DataLoader(cal_dataset, batch_size=100, shuffle=False, pin_memory=True)
test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, pin_memory=True)
epochs = 100
alpha = 0.1
##################################
# Split Conformal Prediction
##################################
print("########################## SplitPredictor ###########################")
model = build_regression_model("NonLinearNet")(X.shape[1], 1, 64, 0.5).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(epochs):
train(model, device, epoch, train_data_loader, criterion, optimizer)
model.eval()
predictor = SplitPredictor(model)
predictor.calibrate(cal_data_loader, alpha)
print(predictor.evaluate(test_data_loader))
def test_time_series():
##################################
# Preparing dataset
##################################
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
fix_randomness(seed=2)
X, y = build_reg_data(data_name="synthetic")
num_examples = X.shape[0]
T0 = int(num_examples * 0.4)
train_dataset = TensorDataset(torch.from_numpy(X[:T0, :]), torch.from_numpy(y[:T0]))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=100, shuffle=True, pin_memory=True)
alpha = 0.1
quantiles = [alpha / 2, 1 - alpha / 2]
model = build_regression_model("NonLinearNet")(X.shape[1], 2, 64, 0.5).to(device) | criterion = QuantileLoss(quantiles) | 3 | 2023-12-06 09:08:41+00:00 | 4k |
vintagedave/Fontimize | tests.py | [
{
"identifier": "get_used_characters_in_html",
"path": "fontimize.py",
"snippet": "def get_used_characters_in_html(html : str) -> set[chr]:\n soup = BeautifulSoup(html, 'html.parser')\n text = soup.get_text()\n return get_used_characters_in_str(text)"
},
{
"identifier": "charPair",
... | import os
import unittest
import sys
from unittest.mock import patch
from fontimize import get_used_characters_in_html, charPair, _get_char_ranges, optimise_fonts, optimise_fonts_for_files
from fontTools.ttLib import woff2, TTFont | 3,279 |
class TestGetUsedCharactersInHtml(unittest.TestCase):
def test_empty_html(self):
self.assertEqual(get_used_characters_in_html(''), set(' '))
def test_html_with_no_text(self):
self.assertEqual(get_used_characters_in_html('<html><body></body></html>'), set(' '))
def test_html_with_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_repeated_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World! Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_multiple_spans(self):
self.assertEqual(get_used_characters_in_html('<html><body><span>Hello</span><span>, </span><span>World!</span></body></html>'), set('Hello, World!'))
def test_html_with_multiple_divs(self):
self.assertEqual(get_used_characters_in_html('<html><body><div>Hello</div><div>, </div><div>World!</div></body></html>'), set('Hello, World!'))
def test_html_with_links(self):
self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!'))
def test_html_with_nested_tags(self):
self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!'))
class TestCharPairs(unittest.TestCase):
def test_get_range_with_single_char(self):
self.assertEqual(charPair('a', 'a').get_range(), 'U+0061')
# Note that the second of the pair does not have the "U+" -- this caught me out
# with parse errors inside TTF2Web()
def test_get_range_with_two_chars(self):
self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062')
def test_get_range_with_multiple_chars(self):
self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064')
class TestCharRanges(unittest.TestCase):
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self):
|
class TestGetUsedCharactersInHtml(unittest.TestCase):
def test_empty_html(self):
self.assertEqual(get_used_characters_in_html(''), set(' '))
def test_html_with_no_text(self):
self.assertEqual(get_used_characters_in_html('<html><body></body></html>'), set(' '))
def test_html_with_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_repeated_text(self):
self.assertEqual(get_used_characters_in_html('<html><body>Hello, World! Hello, World!</body></html>'), set('Hello, World!'))
def test_html_with_multiple_spans(self):
self.assertEqual(get_used_characters_in_html('<html><body><span>Hello</span><span>, </span><span>World!</span></body></html>'), set('Hello, World!'))
def test_html_with_multiple_divs(self):
self.assertEqual(get_used_characters_in_html('<html><body><div>Hello</div><div>, </div><div>World!</div></body></html>'), set('Hello, World!'))
def test_html_with_links(self):
self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!'))
def test_html_with_nested_tags(self):
self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!'))
class TestCharPairs(unittest.TestCase):
def test_get_range_with_single_char(self):
self.assertEqual(charPair('a', 'a').get_range(), 'U+0061')
# Note that the second of the pair does not have the "U+" -- this caught me out
# with parse errors inside TTF2Web()
def test_get_range_with_two_chars(self):
self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062')
def test_get_range_with_multiple_chars(self):
self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064')
class TestCharRanges(unittest.TestCase):
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self): | result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False) | 3 | 2023-12-07 13:23:46+00:00 | 4k |
wanghao-cst/Omni-VideoAssistant | llava/model/omni_arch.py | [
{
"identifier": "build_vision_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.p... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 3,409 | if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} # weight:torch.Size([4096, 1024]) bias:torch.Size([4096])
# import pdb;pdb.set_trace()
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
# v1.5: mm_projector_weights['model.mm_projector.0.weight'].shape: torch.Size([4096, 1024])
# model.mm_projector.0.bias: torch.Size([4096]); model.mm_projector.2.weight: torch.Size([4096, 4096]); model.mm_projector.2.bias: torch.Size([4096])
if getattr(self, 'frames_conv', None) is None: ## Implement continue finetuning.
# self.frames_attn = MultiheadAttention(256*4096, num_heads)
# self.frames_conv = nn.Conv2d(4096, 4096, kernel_size=(12,1), stride=(10,1)) # b 4096 51 256
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
# self.keyframes_attn = MultiheadAttention(256*4096, num_heads)
# import pdb;pdb.set_trace()
self.config.mm_video_fuser = 'frames_conv'
class OmniMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_frames(self, frames):
frames_features = self.get_model().get_vision_tower()(frames) # torch.Size([276, 256, 1024])
frames_features = self.get_model().mm_projector(frames_features) # torch.Size([276, 256, 4096]) torch.float16
return frames_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, videos
):
vision_tower = self.get_vision_tower()
# import pdb;pdb.set_trace()
# frames_attn = self.get_model().frames_attn
frames_conv = self.get_model().frames_conv
# keyframes_attn = self.get_model().keyframes_attn
# import pdb;pdb.set_trace()
if vision_tower is None or videos is None or input_ids.shape[1] == 1: # False
if past_key_values is not None and vision_tower is not None and videos is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
# videos = [torch.Size([51, 3, 224, 224]), torch.Size([79, 3, 224, 224]), torch.Size([60, 3, 224, 224]), torch.Size([86, 3, 224, 224])]
assert type(videos) is list or videos.ndim == 5 # True
concat_frames = torch.cat([video for video in videos], dim=0) # torch.Size([79, 3, 336, 336])
# import pdb;pdb.set_trace()
frames_features = self.encode_frames(concat_frames) # torch.Size([276, 256, 4096]) torch.Size([79, 576, 4096])
split_sizes = [video.shape[0] for video in videos] # [51, 79, 60, 86]
frames_features = torch.split(frames_features, split_sizes, dim=0) # (torch.Size([51, 256, 4096]), torch.Size([79, 256, 4096]), torch.Size([60, 256, 4096]), torch.Size([86, 256, 4096]))
# import pdb;pdb.set_trace()
# frames_features = [x.flatten(0, 1) for x in frames_features]
key_frames_feature = []
for frame_feature in frames_features:
# import pdb;pdb.set_trace()
frame_feature = frame_feature.unsqueeze(0) # b 51 256 4096
frame_feature = frame_feature.permute(0,2,1,3) # b 256 51 4096
# short video
if frame_feature.shape[2] >= 12:
frame_feature = frames_conv(frame_feature) # torch.Size([1, 256, 4, 4096])
frame_feature = frame_feature.squeeze(0).permute(1,0,2) # torch.Size([4, 256, 4096])
# key_frames_feature.append(frame_feature[:6])
# import pdb;pdb.set_trace()
num_frames = frame_feature.shape[0]
key_frames_feature.append(frame_feature[::max(1,num_frames//5)][:6]) # v1.5 576 patch
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_video_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375])
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_frames_features = key_frames_feature[cur_video_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
# import pdb;pdb.set_trace()
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0)
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_video_idx += 1
# import pdb;pdb.set_trace()
# never enter it
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),)
cur_new_input_embeds = []
if labels is not None: # torch.Size([4, 375])
cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0: # 统计元素个数 1
# import pdb;pdb.set_trace()
# if cur_video_idx > len(key_frames_feature)-1:
# cur_frames_features = key_frames_feature[-1] # for gradio demo
# else:
cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096])
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OmniMetaModel:
def __init__(self, config):
super(OmniMetaModel, self).__init__(config)
# import pdb;pdb.set_trace()
if hasattr(config, "mm_vision_tower"): # train False, v1.5 continue finetune True
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
# import pdb;pdb.set_trace()
if hasattr(config, "mm_video_fuser"):
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1))
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 for exp1 test uncomment it
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None): # Train
vision_tower = model_args.vision_tower # 'openai/clip-vit-large-patch14'
mm_vision_select_layer = model_args.mm_vision_select_layer # -2
mm_vision_select_feature = model_args.mm_vision_select_feature # patch
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter # '/home/wanghao/weights/llava/llava-pretrain-vicuna-7b-v1.3/mm_projector.bin'
self.config.mm_vision_tower = vision_tower
# import pdb;pdb.set_trace()
# vision_tower = build_vision_tower(model_args)
if self.get_vision_tower() is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else: ## Implement continue finetuning.
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size # 1024
self.config.mm_vision_select_layer = mm_vision_select_layer # -2
self.config.mm_vision_select_feature = mm_vision_select_feature # patch
# self.mm_projector = build_vision_projector(self.config) # 1024->4096
if getattr(self, 'mm_projector', None) is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True
# import pdb;pdb.set_trace()
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} # weight:torch.Size([4096, 1024]) bias:torch.Size([4096])
# import pdb;pdb.set_trace()
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
# v1.5: mm_projector_weights['model.mm_projector.0.weight'].shape: torch.Size([4096, 1024])
# model.mm_projector.0.bias: torch.Size([4096]); model.mm_projector.2.weight: torch.Size([4096, 4096]); model.mm_projector.2.bias: torch.Size([4096])
if getattr(self, 'frames_conv', None) is None: ## Implement continue finetuning.
# self.frames_attn = MultiheadAttention(256*4096, num_heads)
# self.frames_conv = nn.Conv2d(4096, 4096, kernel_size=(12,1), stride=(10,1)) # b 4096 51 256
# self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096
# self.keyframes_attn = MultiheadAttention(256*4096, num_heads)
# import pdb;pdb.set_trace()
self.config.mm_video_fuser = 'frames_conv'
class OmniMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_frames(self, frames):
frames_features = self.get_model().get_vision_tower()(frames) # torch.Size([276, 256, 1024])
frames_features = self.get_model().mm_projector(frames_features) # torch.Size([276, 256, 4096]) torch.float16
return frames_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, videos
):
vision_tower = self.get_vision_tower()
# import pdb;pdb.set_trace()
# frames_attn = self.get_model().frames_attn
frames_conv = self.get_model().frames_conv
# keyframes_attn = self.get_model().keyframes_attn
# import pdb;pdb.set_trace()
if vision_tower is None or videos is None or input_ids.shape[1] == 1: # False
if past_key_values is not None and vision_tower is not None and videos is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
# videos = [torch.Size([51, 3, 224, 224]), torch.Size([79, 3, 224, 224]), torch.Size([60, 3, 224, 224]), torch.Size([86, 3, 224, 224])]
assert type(videos) is list or videos.ndim == 5 # True
concat_frames = torch.cat([video for video in videos], dim=0) # torch.Size([79, 3, 336, 336])
# import pdb;pdb.set_trace()
frames_features = self.encode_frames(concat_frames) # torch.Size([276, 256, 4096]) torch.Size([79, 576, 4096])
split_sizes = [video.shape[0] for video in videos] # [51, 79, 60, 86]
frames_features = torch.split(frames_features, split_sizes, dim=0) # (torch.Size([51, 256, 4096]), torch.Size([79, 256, 4096]), torch.Size([60, 256, 4096]), torch.Size([86, 256, 4096]))
# import pdb;pdb.set_trace()
# frames_features = [x.flatten(0, 1) for x in frames_features]
key_frames_feature = []
for frame_feature in frames_features:
# import pdb;pdb.set_trace()
frame_feature = frame_feature.unsqueeze(0) # b 51 256 4096
frame_feature = frame_feature.permute(0,2,1,3) # b 256 51 4096
# short video
if frame_feature.shape[2] >= 12:
frame_feature = frames_conv(frame_feature) # torch.Size([1, 256, 4, 4096])
frame_feature = frame_feature.squeeze(0).permute(1,0,2) # torch.Size([4, 256, 4096])
# key_frames_feature.append(frame_feature[:6])
# import pdb;pdb.set_trace()
num_frames = frame_feature.shape[0]
key_frames_feature.append(frame_feature[::max(1,num_frames//5)][:6]) # v1.5 576 patch
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_video_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375])
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_frames_features = key_frames_feature[cur_video_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
# import pdb;pdb.set_trace()
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0)
# cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_video_idx += 1
# import pdb;pdb.set_trace()
# never enter it
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),)
cur_new_input_embeds = []
if labels is not None: # torch.Size([4, 375])
cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0: # 统计元素个数 1
# import pdb;pdb.set_trace()
# if cur_video_idx > len(key_frames_feature)-1:
# cur_frames_features = key_frames_feature[-1] # for gradio demo
# else:
cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096])
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start]) | cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) | 2 | 2023-12-05 08:02:17+00:00 | 4k |
RobertCsordas/moe_attention | layers/transformer/relative_transformer.py | [
{
"identifier": "ActivationFunction",
"path": "layers/transformer/transformer.py",
"snippet": "class TransformerEncoderLayer(torch.nn.Module):\nclass TransformerDecoderLayer(torch.nn.Module):\nclass TransformerDecoderBase(torch.nn.Module):\n class State:\nclass TransformerEncoder(torch.nn.Module):\nc... | from typing import Optional
from .transformer import ActivationFunction
from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttention, AttentionMask
from .multi_head_attention import MultiHeadAttention
from .transformer import Transformer, TransformerEncoderWithLayer, TransformerDecoderWithLayer
import torch
import torch.nn
import torch.nn.functional as F | 3,039 |
class RelativeTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,
head_projection_size: Optional[int] = None, ln_after_attention: bool = True):
super().__init__()
self.ln_after_attention = ln_after_attention
self.self_attn = FixedRelativeMultiheadAttention(
d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
projection_size=head_projection_size)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
if ln_after_attention:
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,
pos_offset: Optional[int] = None) -> torch.Tensor:
src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)
src = src + self.dropout1(src2)
src = self.norm1(src) if self.ln_after_attention else src
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformerDecoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, drop_expand: bool = True):
super().__init__()
self.self_attn = FixedRelativeMultiheadAttention(d_model, nhead, dropout=attention_dropout)
self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
# Implementation of Feedforward model
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.norm3 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.dropout3 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,
memory_key_padding_mask: Optional[torch.Tensor] = None,
full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:
assert pos_offset == 0 or tgt_mask is None
tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=tgt_mask,
pos_offset=pos_offset)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformer(Transformer):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: ActivationFunction = F.relu, attention_dropout: float = 0):
super().__init__(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout, activation,
TransformerEncoderWithLayer(RelativeTransformerEncoderLayer),
|
class RelativeTransformerEncoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,
head_projection_size: Optional[int] = None, ln_after_attention: bool = True):
super().__init__()
self.ln_after_attention = ln_after_attention
self.self_attn = FixedRelativeMultiheadAttention(
d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
projection_size=head_projection_size)
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
if ln_after_attention:
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,
pos_offset: Optional[int] = None) -> torch.Tensor:
src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)
src = src + self.dropout1(src2)
src = self.norm1(src) if self.ln_after_attention else src
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformerDecoderLayer(torch.nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
attention_dropout=0, drop_expand: bool = True):
super().__init__()
self.self_attn = FixedRelativeMultiheadAttention(d_model, nhead, dropout=attention_dropout)
self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
# Implementation of Feedforward model
self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x
self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
self.norm1 = torch.nn.LayerNorm(d_model)
self.norm2 = torch.nn.LayerNorm(d_model)
self.norm3 = torch.nn.LayerNorm(d_model)
self.dropout1 = torch.nn.Dropout(dropout)
self.dropout2 = torch.nn.Dropout(dropout)
self.dropout3 = torch.nn.Dropout(dropout)
self.activation = activation
self.reset_parameters()
def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[AttentionMask] = None,
memory_key_padding_mask: Optional[torch.Tensor] = None,
full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:
assert pos_offset == 0 or tgt_mask is None
tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=tgt_mask,
pos_offset=pos_offset)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')
if self.activation is F.relu else 1.0)
torch.nn.init.xavier_uniform_(self.linear2.weight)
class RelativeTransformer(Transformer):
def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
activation: ActivationFunction = F.relu, attention_dropout: float = 0):
super().__init__(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout, activation,
TransformerEncoderWithLayer(RelativeTransformerEncoderLayer), | TransformerDecoderWithLayer(RelativeTransformerDecoderLayer), attention_dropout) | 5 | 2023-12-13 08:45:02+00:00 | 4k |
riccardomusmeci/mlx-llm | src/mlx_llm/model/_factory.py | [
{
"identifier": "MODEL_ENTRYPOINTS",
"path": "src/mlx_llm/model/_registry.py",
"snippet": "MODEL_ENTRYPOINTS = {\n \"Phi2\": phi2,\n \"LLaMA-2-7B-chat\": llama_2_7B_chat,\n \"TinyLlama-1.1B-Chat-v0.6\": tiny_llama_chat_v06,\n # \"Mistral-7B-Instruct-v0.1\": mistral_7B_instruct_v01,\n \"Mi... | from ._registry import MODEL_ENTRYPOINTS
from typing import Optional, Tuple, Union
from ._utils import load_weights, load_weights_from_hf
import mlx.nn as nn | 1,641 |
__all__ = ["list_models", "create_model"]
def list_models() -> None:
"""List all available LLM models.
"""
print("Available models:")
for model_name in list(MODEL_ENTRYPOINTS.keys()):
print(f"\t- {model_name}")
def create_model(model_name: str, weights: Union[str, bool] = True, strict: bool = False, verbose: bool = False) -> nn.Module:
"""Create a LLM model.
Example:
```
>>> from mlx_llm.model import create_model
>>> # Create a Phi2 model with no pretrained weights.
>>> model = create_model('Phi2')
>>> # Create a Phi2 model with pretrained weights from HF.
>>> model = create_model('Phi2', weights=True)
>>> # Create a Phi2 model with custom weights.
>>> model = create_model('Phi2', weights="path/to/weights.npz")
```
Args:
model_name (str): model name
weights (Union[str, bool]): if True, load pretrained weights from HF. If str, load weights from the given path. Defaults to True.
strict (bool, optional): whether to strictly enforce that the keys in weights match the keys of the model. Defaults to False.
verbose (bool, optional): whether to print the model summary. Defaults to False.
Returns:
nn.Module: a LLM model
Raises:
ValueError: Unknown model name
"""
if model_name not in MODEL_ENTRYPOINTS:
raise ValueError(f"Unknown model name: {model_name}.")
model = MODEL_ENTRYPOINTS[model_name]()
if weights and isinstance(weights, bool):
model = load_weights_from_hf(
model=model,
model_name=model_name,
strict=strict,
verbose=verbose
)
elif isinstance(weights, str):
|
__all__ = ["list_models", "create_model"]
def list_models() -> None:
"""List all available LLM models.
"""
print("Available models:")
for model_name in list(MODEL_ENTRYPOINTS.keys()):
print(f"\t- {model_name}")
def create_model(model_name: str, weights: Union[str, bool] = True, strict: bool = False, verbose: bool = False) -> nn.Module:
"""Create a LLM model.
Example:
```
>>> from mlx_llm.model import create_model
>>> # Create a Phi2 model with no pretrained weights.
>>> model = create_model('Phi2')
>>> # Create a Phi2 model with pretrained weights from HF.
>>> model = create_model('Phi2', weights=True)
>>> # Create a Phi2 model with custom weights.
>>> model = create_model('Phi2', weights="path/to/weights.npz")
```
Args:
model_name (str): model name
weights (Union[str, bool]): if True, load pretrained weights from HF. If str, load weights from the given path. Defaults to True.
strict (bool, optional): whether to strictly enforce that the keys in weights match the keys of the model. Defaults to False.
verbose (bool, optional): whether to print the model summary. Defaults to False.
Returns:
nn.Module: a LLM model
Raises:
ValueError: Unknown model name
"""
if model_name not in MODEL_ENTRYPOINTS:
raise ValueError(f"Unknown model name: {model_name}.")
model = MODEL_ENTRYPOINTS[model_name]()
if weights and isinstance(weights, bool):
model = load_weights_from_hf(
model=model,
model_name=model_name,
strict=strict,
verbose=verbose
)
elif isinstance(weights, str): | model = load_weights( | 1 | 2023-12-07 16:19:47+00:00 | 4k |
xetdata/xetcache | xetcache/xetmemo_kernel_extension.py | [
{
"identifier": "hash_anything",
"path": "xetcache/util.py",
"snippet": "def hash_anything(x):\n return hashlib.sha256(pickle.dumps(x)).hexdigest()"
},
{
"identifier": "probe_memo",
"path": "xetcache/util.py",
"snippet": "def probe_memo(memopath, inputhashstr, key=None):\n \"\"\"\n... | import os
import time
from .util import hash_anything, probe_memo, store_memo
from .config import get_memo_path, get_runtime_threshold
from IPython.core.magic import Magics, magics_class, cell_magic | 1,730 |
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line
inputhashes = [hash_anything(line), hash_anything(cell)]
for i in inputvars:
try:
var = ip.ev(i)
except Exception as e:
print(f"Unable to read variable {i}. Error {e}")
return
try:
h = hash_anything(var)
except Exception as e:
print(f"Unable to hash variable {i}. Error {e}")
return
inputhashes.append(h)
# Then we hash the list of hashes and use that as the filename
inputhashstr = hash_anything(inputhashes)
memopath = get_memo_path()
runtime_threshold = get_runtime_threshold()
try:
retrieved_vals = probe_memo(memopath, inputhashstr, key)
if retrieved_vals is not None:
keys = retrieved_vals.keys()
print(f"Retrieving variables {list(keys)}")
for k, v in retrieved_vals.items():
ip.user_ns[k] = v
return
except Exception as e:
print(f"Unable to load from memo from {memopath}: {e}")
print("Executing the cell normally")
start_time = time.time()
ret = ip.run_cell(cell)
elapsed_time = time.time() - start_time
if ret.success and (always or elapsed_time > runtime_threshold):
try:
storedict = {}
for v in outputvars:
if v not in ip.user_ns:
print(f"{v} not found in scope. Error in specification. Not memoizing.")
return
storedict[v] = ip.user_ns[v]
|
@magics_class
class XMemoMagics(Magics):
"""Memoization for data science tasks
%load_ext xetcache
to load the extension
"""
def __init__(self, *args, **kwargs):
print(self.xetmemo.__doc__)
memopath = get_memo_path()
print(f"Memoizing to {memopath}")
super().__init__(*args, **kwargs)
@cell_magic
def xetmemo(self, line, cell):
'''
Usage:
%%xetmemo input=v1,v2 output=v3,v4
Caches the specified output variables each time it is called.
If called later with the same inputs , the cached value is returned
and not reevaluated. This is persistent across Python runs.
Any content changes to the input input variables, or cell code will
force reevaluation of the cell. Otherwise the outputs will simply be
retrieved from the memo.
This memo is persistent across Python processes and if XetHub is used
see `xetcache.set_xet_project`, can be shared with others.
For performance reasons, only functions which take more than 3
seconds (configurable from config.set_runtime_threshold) will be
cached. "always=True" can be added to the xetmemo arguments to
ignore the runime and to always cache
%%xetmemo input=v1,v2 output=v3,v4 always=True
Note that inputs can be anything picklable including functions.
A key parameter can be added to group the stored objects together.
Objects stored with one key will not be retrievable with a different
key
%%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1
Also see the `xetcache.xetmemo` decorator for a version that can be
used as a function decorator
'''
# parse the argument list
args = line.strip().split(' ')
inputvars = []
outputvars = []
ip = self.shell
always = False
key = None
for arg in args:
k, v = arg.split('=')
if k == 'input':
inputvars = [x.strip() for x in v.split(',')]
elif k == 'output':
outputvars = [x.strip() for x in v.split(',')]
elif k == 'always':
always = (v.strip() == 'True')
elif k == 'key':
key = v.strip()
else:
raise RuntimeError(f'Unexpected xmemo key type {k}')
# we hash the xetmemo line, and the contents of the cell
# and all the variables in the input line
inputhashes = [hash_anything(line), hash_anything(cell)]
for i in inputvars:
try:
var = ip.ev(i)
except Exception as e:
print(f"Unable to read variable {i}. Error {e}")
return
try:
h = hash_anything(var)
except Exception as e:
print(f"Unable to hash variable {i}. Error {e}")
return
inputhashes.append(h)
# Then we hash the list of hashes and use that as the filename
inputhashstr = hash_anything(inputhashes)
memopath = get_memo_path()
runtime_threshold = get_runtime_threshold()
try:
retrieved_vals = probe_memo(memopath, inputhashstr, key)
if retrieved_vals is not None:
keys = retrieved_vals.keys()
print(f"Retrieving variables {list(keys)}")
for k, v in retrieved_vals.items():
ip.user_ns[k] = v
return
except Exception as e:
print(f"Unable to load from memo from {memopath}: {e}")
print("Executing the cell normally")
start_time = time.time()
ret = ip.run_cell(cell)
elapsed_time = time.time() - start_time
if ret.success and (always or elapsed_time > runtime_threshold):
try:
storedict = {}
for v in outputvars:
if v not in ip.user_ns:
print(f"{v} not found in scope. Error in specification. Not memoizing.")
return
storedict[v] = ip.user_ns[v] | store_memo(memopath, inputhashstr, storedict, key) | 2 | 2023-12-05 21:59:08+00:00 | 4k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.