id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
21,000 | from typing import Union, Optional
import torch
import torch.nn as nn
import numpy as np
import inspect
from bert4torch.snippets import take_along_dim, torch_div, sequence_padding, create_position_ids_start_at_padding
from bert4torch.snippets import log_info, log_warn, log_warn_once
from bert4torch.tokenizers import TokenizerBase
from packaging import version
from contextlib import contextmanager
import gc
def repetition_penalty_func(input_ids: torch.LongTensor, scores: torch.FloatTensor, penalty: float) -> torch.FloatTensor:
score = torch.gather(scores, 1, input_ids)
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
score = torch.where(score < 0, score * penalty, score / penalty)
scores.scatter_(1, input_ids, score)
return scores | null |
21,001 | from bert4torch.models.transformer import Decoder
from bert4torch.snippets import delete_arguments
from bert4torch.layers import MultiHeadAttentionLayer, BertLayer, BlockIdentity
import math
import torch
from torch import nn
import copy
The provided code snippet includes necessary dependencies for implementing the `apply_alibi_pos_emb` function. Write a Python function `def apply_alibi_pos_emb(self, attention_scores, key_layer)` to solve the following problem:
执行alibi相对位置编码,单独拎出来主要是falcon是在+之后再执行attention_scale的
Here is the function:
def apply_alibi_pos_emb(self, attention_scores, key_layer):
''' 执行alibi相对位置编码,单独拎出来主要是falcon是在+之后再执行attention_scale的 '''
input_dtype = attention_scores.dtype
if input_dtype == torch.float16 or input_dtype == torch.bfloat16:
attention_scores = attention_scores.to(torch.float32)
key_position_scores_r_t = self.relative_positions_encoding(key_layer)
attention_scores = attention_scores + key_position_scores_r_t
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
return attention_scores | 执行alibi相对位置编码,单独拎出来主要是falcon是在+之后再执行attention_scale的 |
21,002 | import torch
from torch import nn
from bert4torch.layers import LayerNorm
from bert4torch.snippets import log_warn, load_state_dict_into_meta_model, find_tied_parameters, JsonConfig
from bert4torch.snippets import get_parameter_device, load_checkpoint, save_checkpoint, copytree
import warnings
from typing import Union, Optional
from torch4keras.model import *
from tqdm import tqdm
import gc
import copy
import re
class BERT_BASE(nn.Module):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
attention_probs_dropout_prob=None, # Attention矩阵的Dropout比例
embedding_size=None, # 指定embedding_size, 不指定则使用config文件的参数
attention_head_size=None, # Attention中V的head_size
attention_key_size=None, # Attention中Q,K的head_size
initializer_range=0.02, # 权重初始化方差
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
compound_tokens=None, # 扩展Embedding
residual_attention_scores=False, # Attention矩阵加残差
keep_hidden_layers=None, # 保留的hidden_layer层的id
hierarchical_position=None, # 是否层次分解位置编码
gradient_checkpoint=False, # 是否使用gradient_checkpoint
output_all_encoded_layers=False, # 是否返回所有layer的hidden_states
tie_emb_prj_weight=False, # 是否绑定embedding和lm_head的权重
return_dict=False, # 是否返回的格式是dict
**kwargs
):
super(BERT_BASE, self).__init__()
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = attention_head_size or self.hidden_size // self.num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.attention_probs_dropout_prob = attention_probs_dropout_prob or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.initializer_range = initializer_range
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_bias = None
self.position_bias = None
self.attention_scores = None
self.residual_attention_scores = residual_attention_scores
self.keep_hidden_layers = set(range(num_hidden_layers)) if keep_hidden_layers is None else set(keep_hidden_layers)
self.hierarchical_position = hierarchical_position
self.gradient_checkpoint = gradient_checkpoint
self.quantized = False
self.output_all_encoded_layers = output_all_encoded_layers
self.add_trainer = kwargs['add_trainer']
self.tie_emb_prj_weight = tie_emb_prj_weight
self.return_dict = return_dict
def tie_weights(self):
pass
def gradient_checkpointing_enable(self):
self.gradient_checkpoint=True
def enable_input_require_grads(self):
"""transformer移植来
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
"""
def make_inputs_require_grads(module, input, output):
output.requires_grad_(True)
self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)
def disable_input_require_grads(self):
"""transformer移植来
Removes the `_require_grads_hook`.
"""
self._require_grads_hook.remove()
def get_kw(self, *args, **kwargs):
'''把self.属性设置到kwargs中, 方便传参'''
for arg in args:
kwargs[arg] = getattr(self, arg)
return kwargs
def args_segmentate(self, inputs, **model_kwargs):
'''解析输入,转成list,tuple类型'''
# 传入[x1,x2]时,*inputs会解析成([x1,x2],),此时需要取第一个元素
if (len(inputs)==1) and isinstance(inputs[0], (tuple,list)):
return inputs[0]
return inputs
def forward(self, *inputs, **model_kwargs):
"""定义模型的训练流程
:param inputs: List[torch.Tensor], 默认顺序是[token_ids, segment_ids(若有), position_ids(若有), custom_attention_mask(若有), conditional_input(若有)]
:return: List[torch.Tensor] or torch.Tensor, 模型输出,默认顺序为[last_hidden_state/all_encoded_layers, pooled_output(若有), mlm_scores(若有), nsp_scores(若有)]
"""
# 允许model([token_ids, segment_ids]), model(token_ids, segment_ids)调用方式
inputs = self.args_segmentate(inputs, **model_kwargs)
# Embedding
model_kwargs = self.apply_embeddings(*inputs, **model_kwargs)
# Main
model_kwargs = self.apply_main_layers(**model_kwargs)
# Final
outputs = self.apply_final_layers(**model_kwargs)
if model_kwargs.get('use_states', False):
return outputs, model_kwargs
return outputs
def predict(self, *inputs, **model_kwargs):
"""定义模型的预测流程
:param inputs: List[torch.Tensor], 默认顺序是[token_ids, segment_ids(若有), position_ids(若有), custom_attention_mask(若有), conditional_input(若有)]
:return: List[torch.Tensor] or torch.Tensor, 模型输出,默认顺序为[last_hidden_state/all_encoded_layers, pooled_output(若有), mlm_scores(若有), nsp_scores(若有)]
"""
if self.training:
self.eval()
return self.forward(*inputs, **model_kwargs)
def init_model_weights(self, module):
""" 初始化权重 """
if isinstance(module, (nn.Linear, nn.Embedding)) and (module.weight.requires_grad):
# bert参数初始化, tf版本在linear和Embedding层使用的是截断正太分布, pytorch没有实现该函数,
# 此种初始化对于加载预训练模型后进行finetune没有任何影响,
# cf https://github.com/pytorch/pytorch/pull/5617
# 固定的相对位置编码如Sinusoidal无需初始化
module.weight.data.normal_(mean=0.0, std=self.initializer_range)
elif isinstance(module, LayerNorm):
if hasattr(module, 'bias') and (module.bias is not None) and module.bias.requires_grad: # T5等模型使用的是rmsnorm
module.bias.data.zero_()
if hasattr(module, 'weight') and module.weight.requires_grad:
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and (module.bias is not None) and (module.bias.requires_grad):
module.bias.data.zero_()
def init_meta_weights(self, module):
'''meta weights初始化, 主要是在量化里面用到
'''
if hasattr(module, 'weight') and module.weight.device == torch.device('meta'):
module.to_empty(device='cpu')
def variable_mapping(self):
"""构建pytorch层与checkpoint的变量名之间的映射表"""
return {}
def load_variable(self):
raise NotImplementedError
def load_embeddings(self, embeddings):
"""根据keep_tokens和compound_tokens对embedding进行修改"""
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = []
for item in self.compound_tokens:
try:
ext_embeddings.append(torch.mean(embeddings[item], 0) * torch.ones_like(embeddings[item]))
except IndexError:
ext_embeddings.append(torch.mean(embeddings, 0, keepdim=True))
warnings.warn(f'Initialize ext_embeddings from compound_tokens not in embedding index')
embeddings = torch.cat([embeddings] + ext_embeddings, 0)
return embeddings
def load_trans_ckpt(self, checkpoint:str):
"""加载ckpt并转换
1. 支持.safe_tensors + .bin
2. 方便后续各个模型继承并做一些预处理, 如对qkv权重进行split
"""
return load_checkpoint(checkpoint)
def from_pretrained_single(self, checkpoint:Union[str, os.PathLike]=None, mapping:dict=None, skip_init:bool=False,
device_map:dict=None, torch_dtype=None, verbose=1):
"""加载预训练模型(单个权重文件),根据mapping从checkpoint加载权重"""
# 加载模型文件, 并可专业些转换
ckpt_state_dict = self.load_trans_ckpt(checkpoint)
# 计算mapping
mapping = mapping or self.variable_mapping()
model_params = set([i[0] for i in self.named_parameters()]) # 可更新的变量
# 如果ckpt和model中同时存在,且不在预设的mapping中,则更新mapping
# 主要是为了在外部继承BERT后有其他layer,也能自动从checkpoint中加载进来
for layer_name in model_params:
if (layer_name in ckpt_state_dict) and (layer_name not in mapping):
mapping.update({layer_name: layer_name})
state_dict_new = {} # 用new_key作为key整理后的权重字典
missing_keys = [] # 即model-ckpt, 当前加载中没有成功加载的权重old_keys名
over_keys = set(ckpt_state_dict.keys()) # ckpt-model
needed_keys = [] # 所需要的全部的old_keys名
model_state_dict = self.state_dict() # 模型的字典
for new_key, old_key in mapping.items():
# 1. mapping和model不一致则忽略,如with_nsp=False时候在mapping中有但是model中没有
if new_key not in model_state_dict:
continue
# 2. model中有,且ckpt中有,正常加载
if old_key in ckpt_state_dict:
state_dict_new[new_key] = self.load_variable(ckpt_state_dict[old_key], old_key, new_key)
# 去除已加载的Parameter,仅保留未能加载预训练权重的Parameter
if old_key in over_keys:
over_keys.remove(old_key)
# 3. model中有,但ckpt中没有,即ckpt中缺失部分参数
else:
missing_keys.append(old_key)
needed_keys.append(old_key)
over_keys = list(over_keys)
del ckpt_state_dict
gc.collect()
self._print_mismatch_keys(missing_keys, over_keys, verbose) # 打印mixmatch keys
# 将ckpt的权重load到模型结构中
if not skip_init:
self.load_state_dict(state_dict_new, strict=False)
else:
load_state_dict_into_meta_model(self, state_dict_new, device_map=device_map, torch_dtype=torch_dtype)
del state_dict_new
gc.collect()
return missing_keys, over_keys, needed_keys
def from_pretrained(self, checkpoints:Union[str, os.PathLike, list], mapping:dict=None, skip_init:bool=False,
device_map:dict=None, torch_dtype=None, verbose=1):
"""加载预训练模型(单个/多个ckpt)"""
# 单个权重文件
if isinstance(checkpoints, str):
self.from_pretrained_single(checkpoints, mapping=mapping, skip_init=skip_init,
device_map=device_map, torch_dtype=torch_dtype, verbose=verbose)
# 多个权重文件
elif isinstance(checkpoints, (tuple, list)):
all_missing_keys, all_over_keys = [], []
tqdm_checkpoints = tqdm(checkpoints)
for checkpoint in tqdm_checkpoints:
tqdm_checkpoints.set_description(f'Loading {os.path.basename(checkpoint)}')
missing_keys, over_keys, needed_keys = \
self.from_pretrained_single(checkpoint, mapping=mapping, skip_init=skip_init,
device_map=device_map, torch_dtype=torch_dtype, verbose=0)
all_missing_keys.extend(missing_keys)
all_over_keys.extend(over_keys)
if checkpoint == checkpoints[-1]:
tqdm_checkpoints.set_description('Loading checkpoint shards')
# 打印mixmatch keys
all_missing_keys = set(all_missing_keys).difference(set(needed_keys))
all_over_keys = set(all_over_keys).difference(set(needed_keys))
self._print_mismatch_keys(all_missing_keys, all_over_keys, verbose)
else:
raise ValueError('Args `checkpoint_path` only support `str` or `list(str)` format')
def _print_mismatch_keys(missing_keys, over_keys, verbose):
"""打印mismatch keys"""
if verbose != 0:
for key in missing_keys: # model中有,但是ckpt中不存在
log_warn(f'`{key}` not found in pretrained checkpoints')
if verbose > 1:
for key in over_keys: # ckpt中存在,但是model中不存在
log_warn(f'`{key}` only exists in pretrained checkpoints but not in model parameters')
def save_trans_ckpt(self):
"""对state_dict进行转换
1. load_trans_ckpt的逆操作
2. 方便后续各个模型继承并做一些预处理, 如合并qkv权重
"""
return self.state_dict()
def save_pretrained(self, save_path:str, weight_map:dict=None, mapping:dict=None, write_to_disk:bool=True, ignore_tied_parameters=False):
'''按照预训练模型的key来保存模型, 可供transformers包加载
1. 按照variable_mapping()逆向来保存权重
2. 各个模型存在save_trans_ckpt()的也要执行, 如部分大模型需要把q,k,v合并为qkv
:param save_path: str, 保存的文件/文件夹路径
:param weight_map: dict, 部分大模型会有pytorch_model.bin.index.json文件, 对应其中的weight_map字段
可`from bert4torch.snippets import JsonConfig
weight_map = JsonConfig(config_path).weight_map`来加载
:param mapping: dict, 一般来说为None, 也允许用户自行指定映射关系(一般不需要)
:param write_to_disk: bool, 是否写入硬盘,一般都是True, 该参数主要是为了在Trainer().save_pretrained
:param ignore_tied_parameters: bool, 保存时候忽视tied_parameters
'''
mapping = mapping or self.variable_mapping()
state_dict = self.save_trans_ckpt()
if ignore_tied_parameters:
named_tied_parameters = find_tied_parameters(self)
tied_parameters = [tied_parameter for _, tied_parameters in named_tied_parameters.items() \
for tied_parameter in tied_parameters]
log_info(f'Remove tied parameters: {tied_parameters}')
for tied_parameter in tied_parameters:
if tied_parameter in state_dict:
state_dict.pop(tied_parameter)
for k in list(state_dict.keys()):
state_dict[mapping.get(k, k)] = state_dict.pop(k)
# 如果save_path是文件夹,则把对应的其他文件copy过去
save_dir = None if re.search('\.[a-zA-z0-9]+$', save_path) else save_path
# 把checkpoint_path所在目录下,除了权重文件的其他文件copy过去
checkpoint_dir = os.path.dirname(self.checkpoint_path) if os.path.isfile(self.checkpoint_path) else self.checkpoint_path
if write_to_disk and hasattr(self, 'checkpoint_path') and (self.checkpoint_path is not None) and save_dir:
copytree(checkpoint_dir, save_dir, ignore_copy_files=['\.bin$', '\.safetensors$'], dirs_exist_ok=True) # 如果目录下文件存在也会强制覆盖
# checkpoint shards对应的.index.json
bin_index_json = [os.path.join(checkpoint_dir, i) for i in os.listdir(checkpoint_dir) if i.endswith('.index.json')]
bin_index_json = bin_index_json[0] if bin_index_json else ''
if (save_dir is not None) and os.path.exists(bin_index_json):
weight_map = weight_map or JsonConfig(bin_index_json).get('weight_map')
# 保存为单文件
if weight_map is None:
if write_to_disk:
save_checkpoint(state_dict, os.path.join(save_dir, 'pytorch_model.bin') if save_dir else save_path)
else:
return state_dict
# 保存为多个文件
else:
ckpt2param = dict()
for param_name, save_file in weight_map.items():
if save_file not in ckpt2param:
ckpt2param[save_file] = set([param_name])
else:
ckpt2param[save_file].add(param_name)
for save_file, param_names in ckpt2param.items():
single_ckpt = {}
for k in list(state_dict.keys()):
if k in param_names:
single_ckpt[k] = state_dict.pop(k)
save_checkpoint(single_ckpt, os.path.join(save_dir or save_path, save_file))
def apply_embeddings(self, *inputs, **model_kwargs):
raise NotImplementedError
def apply_main_layers(self, *inputs, **model_kwargs):
raise NotImplementedError
def apply_final_layers(self, *inputs, **model_kwargs):
raise NotImplementedError
def apply_on_layer_begin(self, l_i, **model_kwargs):
'''新增对layer block输入进行操作的函数'''
if model_kwargs.get('use_states') is not True:
return model_kwargs
if model_kwargs.get('past_key_values') is not None:
model_kwargs['past_key_value'] = model_kwargs['past_key_values'][l_i]
if ('encoder_hidden_states' in model_kwargs) and model_kwargs.get('cross_past_key_values') is not None:
model_kwargs['cross_past_key_value'] = model_kwargs['cross_past_key_values'][l_i]
return model_kwargs
def apply_on_layer_end(self, l_i, **model_kwargs):
'''新增对layer block输出进行操作的函数, 目前仅在MixUp中使用'''
if model_kwargs.get('use_states') is not True:
return model_kwargs
if model_kwargs.get('past_key_value') is not None:
if ('past_key_values' not in model_kwargs) or (model_kwargs.get('past_key_values') is None):
model_kwargs['past_key_values'] = [None]*self.num_hidden_layers
model_kwargs['past_key_values'][l_i] = model_kwargs['past_key_value']
if model_kwargs.get('cross_past_key_value') is not None:
if ('cross_past_key_values' not in model_kwargs) or (model_kwargs.get('cross_past_key_values') is None):
model_kwargs['cross_past_key_values'] = [None]*self.num_hidden_layers
model_kwargs['cross_past_key_values'][l_i] = model_kwargs['cross_past_key_value']
return model_kwargs
def compute_attention_bias(self, inputs=None):
"""定义每一层的Attention Bias"""
return self.attention_bias
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)"""
return self.position_bias
def set_outputs(self, outputs):
"""设置output和oututs属性"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
def quantize(self, quantization_method, **kwargs):
'''量化'''
if self.quantized:
print("Already quantized.")
return self
new_kwargs = copy.deepcopy(kwargs)
if 'model' in new_kwargs:
new_kwargs.pop('model')
# chatglm的量化方式
if quantization_method == 'cpm_kernels':
from bert4torch.quantization import quantize_cpm_kernels
self = quantize_cpm_kernels(self, **new_kwargs)
# load_in_8bit, load_in_4bit
elif quantization_method in {'load_in_8bit', 'load_in_4bit'}:
from bert4torch.quantization import quantize_load_in_kbit
load_in_8bit = True if quantization_method == 'load_in_8bit' else False
load_in_4bit = True if quantization_method == 'load_in_4bit' else False
self = quantize_load_in_kbit(self, load_in_8bit=load_in_8bit, load_in_4bit=load_in_4bit, **new_kwargs)
else:
raise ValueError('Please check args `quantization_method`')
self.quantized = True
torch.cuda.empty_cache()
return self
def add_adapter(self, adapter_method='bottleneck', bottlenect_size=64):
'''增加adapter层'''
from bert4torch.layers import add_adapter
self = add_adapter(self, adapter_method, bottlenect_size)
self.print_trainable_parameters()
return self
def get_peft_model(self, peft_config, adapter_name="default"):
'''hf的peft库:https://github.com/huggingface/peft
peft的接口LoraModel接口有变,这里使用v0.0.3
'''
import peft
self.peft_config = {adapter_name: peft_config}
if isinstance(peft_config, peft.LoraConfig):
model = peft.LoraModel(self, self.peft_config, adapter_name)
elif isinstance(peft_config, peft.AdaLoraConfig):
model = peft.AdaLoraModel(self, self.peft_config, adapter_name)
else:
raise ValueError(f'{type(peft_config)} has not been supported')
# 返回的model无法使用torch4keras的trainer
self = add_trainer(model) if self.add_trainer else model
self.print_trainable_parameters()
return self
def print_trainable_parameters(self):
"""打印可训练的参数量"""
print_trainable_parameters(self)
def device(self) -> torch.device:
"""获取model所在的device"""
return get_parameter_device(self)
The provided code snippet includes necessary dependencies for implementing the `extend_with_base_model` function. Write a Python function `def extend_with_base_model(InputModel)` to solve the following problem:
添加torch4keras的BaseModel, 可以使用.compile, .fit等Trainer的功能
Here is the function:
def extend_with_base_model(InputModel):
"""添加torch4keras的BaseModel, 可以使用.compile, .fit等Trainer的功能"""
class BertBaseModel(InputModel, BERT_BASE, BaseModel):
pass
return BertBaseModel | 添加torch4keras的BaseModel, 可以使用.compile, .fit等Trainer的功能 |
21,003 | import torch
from torch import nn
from bert4torch.layers import LayerNorm
from bert4torch.snippets import log_warn, load_state_dict_into_meta_model, find_tied_parameters, JsonConfig
from bert4torch.snippets import get_parameter_device, load_checkpoint, save_checkpoint, copytree
import warnings
from typing import Union, Optional
from torch4keras.model import *
from tqdm import tqdm
import gc
import copy
import re
class LM_Mask(object):
"""定义下三角Attention Mask(语言模型用)"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask"""
token_ids = inputs[0]
seq_len = token_ids.shape[1]
attention_bias = torch.tril(torch.ones(seq_len, seq_len, dtype=torch.long, device=inputs[0].device), diagonal=0)
self.attention_bias = attention_bias.unsqueeze(0).unsqueeze(1)
return self.attention_bias
The provided code snippet includes necessary dependencies for implementing the `extend_with_language_model` function. Write a Python function `def extend_with_language_model(InputModel)` to solve the following problem:
添加下三角的Attention Mask(语言模型用)
Here is the function:
def extend_with_language_model(InputModel):
"""添加下三角的Attention Mask(语言模型用)"""
class LanguageModel(LM_Mask, InputModel):
"""带下三角Attention Mask的派生模型"""
def __init__(self, *args, **kwargs):
kwargs['with_mlm'] = kwargs.get('with_mlm') or True
super(LanguageModel, self).__init__(*args, **kwargs)
return LanguageModel | 添加下三角的Attention Mask(语言模型用) |
21,004 | import torch
from torch import nn
from bert4torch.layers import LayerNorm
from bert4torch.snippets import log_warn, load_state_dict_into_meta_model, find_tied_parameters, JsonConfig
from bert4torch.snippets import get_parameter_device, load_checkpoint, save_checkpoint, copytree
import warnings
from typing import Union, Optional
from torch4keras.model import *
from tqdm import tqdm
import gc
import copy
import re
class UniLM_Mask(object):
"""定义UniLM的Attention Mask(Seq2Seq模型用);
其中source和target的分区,由segment_ids来表示。
UniLM: https://arxiv.org/abs/1905.03197
"""
def compute_attention_bias(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask"""
segment_ids = inputs[1]
attention_bias = torch.cumsum(segment_ids, dim=1)
attention_bias = (attention_bias.unsqueeze(1)) <= (attention_bias.unsqueeze(2))
self.attention_bias = attention_bias.unsqueeze(1).long()
return self.attention_bias
The provided code snippet includes necessary dependencies for implementing the `extend_with_unified_language_model` function. Write a Python function `def extend_with_unified_language_model(InputModel)` to solve the following problem:
添加UniLM的Attention Mask(Seq2Seq模型用)
Here is the function:
def extend_with_unified_language_model(InputModel):
"""添加UniLM的Attention Mask(Seq2Seq模型用)"""
class UnifiedLanguageModel(UniLM_Mask, InputModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
kwargs['with_mlm'] = kwargs.get('with_mlm') or True
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
return UnifiedLanguageModel | 添加UniLM的Attention Mask(Seq2Seq模型用) |
21,005 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
def run_exp(env, eval_mode:bool, enable_render:bool, train_dir, data_dir, config, flags, eval_flags, common_flags):
opt_base_cmd = [ "python", "opt.py", "--tune_mode" ]
if not eval_mode:
opt_base_cmd += ["--tune_nosave"]
opt_base_cmd += [
"-t", train_dir,
data_dir
]
if config != '':
opt_base_cmd += ['-c', config]
log_file_path = path.join(train_dir, 'log')
psnr_file_path = path.join(train_dir, PSNR_FILE_NAME)
ckpt_path = path.join(train_dir, 'ckpt.npz')
if path.isfile(psnr_file_path):
print('! SKIP', train_dir)
return
print('********************************************')
if eval_mode:
print('EVAL MODE')
if eval_mode and path.isfile(ckpt_path):
print('! SKIP training because ckpt exists', ckpt_path)
opt_ret = "" # Silence
else:
print('! RUN opt.py -t', train_dir)
opt_cmd = ' '.join(opt_base_cmd + flags + common_flags)
print(opt_cmd)
try:
opt_ret = subprocess.check_output(opt_cmd, shell=True, env=env).decode(
sys.stdout.encoding)
except subprocess.CalledProcessError:
print('Error occurred while running OPT for exp', train_dir, 'on', env["CUDA_VISIBLE_DEVICES"])
return
with open(log_file_path, 'w') as f:
f.write(opt_ret)
if eval_mode:
eval_base_cmd = [
"python", "render_imgs.py",
ckpt_path,
data_dir
]
if config != '':
eval_base_cmd += ['-c', config]
psnr_file_path = path.join(train_dir, 'test_renders', 'psnr.txt')
if not path.exists(psnr_file_path):
eval_cmd = ' '.join(eval_base_cmd + eval_flags + common_flags)
print('! RUN render_imgs.py', ckpt_path)
print(eval_cmd)
try:
eval_ret = subprocess.check_output(eval_cmd, shell=True, env=env).decode(
sys.stdout.encoding)
except subprocess.CalledProcessError:
print('Error occurred while running EVAL for exp', train_dir, 'on', env["CUDA_VISIBLE_DEVICES"])
return
else:
print('! SKIP eval because psnr.txt exists', psnr_file_path)
if enable_render:
eval_base_cmd += ['--render_path']
render_cmd = ' '.join(eval_base_cmd + eval_flags + common_flags)
try:
render_ret = subprocess.check_output(render_cmd, shell=True, env=env).decode(
sys.stdout.encoding)
except subprocess.CalledProcessError:
print('Error occurred while running RENDER for exp', train_dir, 'on', env["CUDA_VISIBLE_DEVICES"])
return
else:
test_stats = [eval(x.split('eval stats:')[-1].strip())
for x in opt_ret.split('\n') if
x.startswith('eval stats: ')]
if len(test_stats) == 0:
print('note: invalid config or crash')
final_test_psnr = 0.0
else:
test_psnrs = [stats['psnr'] for stats in test_stats if 'psnr' in stats.keys()]
print('final psnrs', test_psnrs[-5:])
final_test_psnr = test_psnrs[-1]
with open(psnr_file_path, 'w') as f:
f.write(str(final_test_psnr))
def process_main(device, eval_mode:bool, enable_render:bool, queue):
# Set CUDA_VISIBLE_DEVICES programmatically
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = str(device)
while True:
task = queue.get()
if len(task) == 0:
break
run_exp(env, eval_mode, enable_render, **task) | null |
21,006 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
def lin(start, stop, num):
return np.linspace(start, stop, num).tolist() | null |
21,007 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
def randlin(start, stop, num):
lst = np.linspace(start, stop, num + 1)[:-1]
lst += np.random.uniform(low=0.0, high=(lst[1] - lst[0]), size=lst.shape)
return lst.tolist() | null |
21,008 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
def loglin(start, stop, num):
return np.exp(np.linspace(np.log(start), np.log(stop), num)).tolist() | null |
21,009 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
def randloglin(start, stop, num):
lst = np.linspace(np.log(start), np.log(stop), num + 1)[:-1]
lst += np.random.uniform(low=0.0, high=(lst[1] - lst[0]), size=lst.shape)
return np.exp(lst).tolist() | null |
21,010 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
The provided code snippet includes necessary dependencies for implementing the `create_prodvars` function. Write a Python function `def create_prodvars(variables, noise_stds={})` to solve the following problem:
Create a dict for each setting of variable values (product across lists)
Here is the function:
def create_prodvars(variables, noise_stds={}):
"""
Create a dict for each setting of variable values
(product across lists)
"""
def auto_list(x):
if isinstance(x, list):
return x
elif isinstance(x, dict) or isinstance(x, set):
return [x]
elif isinstance(x, str):
return eval(x)
else:
raise NotImplementedError('variable value must be list of values, or str generator')
variables = {varname:auto_list(variables[varname]) for varname in variables}
print('variables (prod)', variables)
varnames = list(variables.keys())
noise_stds = np.array([noise_stds.get(varname, 0.0) for varname in varnames])
variables = [[(i, val) for val in variables[varname]] for i, varname in enumerate(varnames)]
prodvars = list(itertools.product(*variables))
noise_vals = np.random.randn(len(prodvars), len(varnames)) * noise_stds
prodvars = [{varnames[i]:((val + n) if n != 0.0 else val) for (i, val), n in zip(sample, noise_vals_samp)} for sample, noise_vals_samp in zip(prodvars, noise_vals)]
return prodvars | Create a dict for each setting of variable values (product across lists) |
21,011 | import torch
import torch.nn.functional as F
import numpy as np
import random
from multiprocessing import Process, Queue
import os
from os import path, listdir
import argparse
import json
import subprocess
import sys
from typing import List, Dict
import itertools
from warnings import warn
from datetime import datetime
import numpy as np
from glob import glob
def recursive_replace(data, variables):
if isinstance(data, str):
return data.format(**variables)
elif isinstance(data, list):
return [recursive_replace(d, variables) for d in data]
elif isinstance(data, dict):
return {k:recursive_replace(data[k], variables) for k in data.keys()}
else:
return data | null |
21,012 | import torch
import torch.cuda
import torch.optim
import torch.nn.functional as F
import svox2
import json
import imageio
import os
from os import path
import shutil
import gc
import numpy as np
import math
import argparse
import cv2
from util.dataset import datasets
from util.util import Timing, get_expon_lr_func, generate_dirs_equirect, viridis_cmap
from util import config_util
from warnings import warn
from datetime import datetime
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from typing import NamedTuple, Optional, Union
device = "cuda" if torch.cuda.is_available() else "cpu"
summary_writer
args.background_nlayers > 0 and not dset.should_use_background:
warn('Using a background model for dataset type ' + str(type(dset)) + ' which typically does not use background')
dset_test = datasets[args.dataset_type](
args.data_dir, split="test", **config_util.build_data_options(args))
global_start_time = datetime.now()
grid = svox2.SparseGrid(reso=reso_list[reso_id],
center=dset.scene_center,
radius=dset.scene_radius,
use_sphere_bound=dset.use_sphere_bound and not args.nosphereinit,
basis_dim=args.sh_dim,
use_z_order=True,
device=device,
basis_reso=args.basis_reso,
basis_type=svox2.__dict__['BASIS_TYPE_' + args.basis_type.upper()],
mlp_posenc_size=args.mlp_posenc_size,
mlp_width=args.mlp_width,
background_nlayers=args.background_nlayers,
background_reso=args.background_reso)
grid.sh_data.data[:] = 0.0
grid.density_data.data[:] = args.lr_fg_begin_step > 0 args.init_sigma
grid.use_background:
grid.background_data.data[..., -1] = args.init_sigma_bg
# grid.background_data.data[..., :-1] = 0.5 / svox2.utils.SH_C0
optim_basis_mlp =
grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
grid.reinit_learned_bases(init_type='sh')
# grid.reinit_learned_bases(init_type='fourier')
# grid.reinit_learned_bases(init_type='sg', upper_hemi=True)
# grid.basis_data.data.normal_(mean=0.28209479177387814, std=0.001)
elif grid.basis_type == svox2.BASIS_TYPE_MLP:
[
svox2.Camera(c2w.to(device=device),
dset.intrins.get('fx', i),
dset.intrins.get('fy', i),
dset.intrins.get('cx', i),
dset.intrins.get('cy', i),
width=dset.get_image_size(i)[1],
height=dset.get_image_size(i)[0],
ndc_coeffs=dset.ndc_coeffs) for i, c2w in enumerate(dset.c2w)
]
ckpt_path = path.join(args.train_dir, 'ckpt.npz')
lr_sigma_func = get_expon_lr_func(args.lr_sigma, args.lr_sigma_final, args.lr_sigma_delay_steps,
args.lr_sigma_delay_mult, args.lr_sigma_decay_steps)
lr_sh_func = get_expon_lr_func(args.lr_sh, args.lr_sh_final, args.lr_sh_delay_steps,
args.lr_sh_delay_mult, args.lr_sh_decay_steps)
lr_basis_func = get_expon_lr_func(args.lr_basis, args.lr_basis_final, args.lr_basis_delay_steps,
args.lr_basis_delay_mult, args.lr_basis_decay_steps)
lr_sigma_bg_func = get_expon_lr_func(args.lr_sigma_bg, args.lr_sigma_bg_final, args.lr_sigma_bg_delay_steps,
args.lr_sigma_bg_delay_mult, args.lr_sigma_bg_decay_steps)
lr_color_bg_func = get_expon_lr_func(args.lr_color_bg, args.lr_color_bg_final, args.lr_color_bg_delay_steps,
args.lr_color_bg_delay_mult, args.lr_color_bg_decay_steps)
lr_sigma_factor = 1.0
lr_sh_factor = 1.0
lr_basis_factor = 1.0
last_upsamp_step = args.init_iters
args.enable_random:
warn("Randomness is enabled for training (normal for LLFF & scenes with background)")
epoch_id = -1
while True:
dset.shuffle_rays()
epoch_id += 1
epoch_size = dset.rays.origins.size(0)
batches_per_epoch = (epoch_size-1)//args.batch_size+1
# Test
def eval_step():
print('Eval step')
with torch.no_grad():
stats_test = {'psnr' : 0.0, 'mse' : 0.0}
# Standard set
N_IMGS_TO_EVAL = min(20 if epoch_id > 0 else 5, dset_test.n_images)
N_IMGS_TO_SAVE = N_IMGS_TO_EVAL # if not args.tune_mode else 1
img_eval_interval = dset_test.n_images // N_IMGS_TO_EVAL
img_save_interval = (N_IMGS_TO_EVAL // N_IMGS_TO_SAVE)
img_ids = range(0, dset_test.n_images, img_eval_interval)
# Special 'very hard' specular + fuzz set
# img_ids = [2, 5, 7, 9, 21,
# 44, 45, 47, 49, 56,
# 80, 88, 99, 115, 120,
# 154]
# img_save_interval = 1
n_images_gen = 0
for i, img_id in tqdm(enumerate(img_ids), total=len(img_ids)):
c2w = dset_test.c2w[img_id].to(device=device)
cam = svox2.Camera(c2w,
dset_test.intrins.get('fx', img_id),
dset_test.intrins.get('fy', img_id),
dset_test.intrins.get('cx', img_id),
dset_test.intrins.get('cy', img_id),
width=dset_test.get_image_size(img_id)[1],
height=dset_test.get_image_size(img_id)[0],
ndc_coeffs=dset_test.ndc_coeffs)
rgb_pred_test = grid.volume_render_image(cam, use_kernel=True)
rgb_gt_test = dset_test.gt[img_id].to(device=device)
all_mses = ((rgb_gt_test - rgb_pred_test) ** 2).cpu()
i % img_save_interval == 0:
img_pred = rgb_pred_test.cpu()
img_pred.clamp_max_(1.0)
summary_writer.add_image(f'test/image_{img_id:04d}',
img_pred, global_step=gstep_id_base, dataformats='HWC')
args.log_mse_image:
mse_img = all_mses / all_mses.max()
summary_writer.add_image(f'test/mse_map_{img_id:04d}',
mse_img, global_step=gstep_id_base, dataformats='HWC')
if args.log_depth_map:
depth_img = grid.volume_render_depth_image(cam,
args.log_depth_map_use_thresh if
args.log_depth_map_use_thresh else None
)
depth_img = viridis_cmap(depth_img.cpu())
summary_writer.add_image(f'test/depth_map_{img_id:04d}',
depth_img,
global_step=gstep_id_base, dataformats='HWC')
rgb_pred_test = rgb_gt_test = None
mse_num : float = all_mses.mean().item()
psnr = -10.0 * math.log10(mse_num)
math.isnan(psnr):
print('NAN PSNR', i, img_id, mse_num)
assert False
stats_test['mse'] += mse_num
stats_test['psnr'] += psnr
n_images_gen += 1
if grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE or \
grid.basis_type == svox2.BASIS_TYPE_MLP:
EQ_RESO = 256
eq_dirs = generate_dirs_equirect(EQ_RESO * 2, EQ_RESO)
eq_dirs = torch.from_numpy(eq_dirs).to(device=device).view(-1, 3)
grid.basis_type == svox2.BASIS_TYPE_MLP:
sphfuncs = grid._eval_basis_mlp(eq_dirs)
else:
sphfuncs = grid._eval_learned_bases(eq_dirs)
sphfuncs = sphfuncs.view(EQ_RESO, EQ_RESO*2, -1).permute([2, 0, 1]).cpu().numpy()
stats = [(sphfunc.min(), sphfunc.mean(), sphfunc.max())
for sphfunc in sphfuncs]
sphfuncs_cmapped = [viridis_cmap(sphfunc) for sphfunc in sphfuncs]
for im, (minv, meanv, maxv) in zip(sphfuncs_cmapped, stats):
cv2.putText(im, f"{minv=:.4f} {meanv=:.4f} {maxv=:.4f}", (10, 20),
0, 0.5, [255, 0, 0])
sphfuncs_cmapped = np.concatenate(sphfuncs_cmapped, axis=0)
summary_writer.add_image(f'test/spheric',
sphfuncs_cmapped, global_step=gstep_id_base, dataformats='HWC')
# END add spherical map visualization
stats_test['mse'] /= n_images_gen
stats_test['psnr'] /= n_images_gen
for stat_name in stats_test:
summary_writer.add_scalar('test/' + stat_name,
stats_test[stat_name], global_step=gstep_id_base)
summary_writer.add_scalar('epoch_id', float(epoch_id), global_step=gstep_id_base)
print('eval stats:', stats_test)
if epoch_id % max(factor, args.eval_every) == 0: #and (epoch_id > 0 or not args.tune_mode):
args.lr_fg_begin_step > 0 and gstep_id == args.lr_fg_begin_step:
grid.density_data.data[:] = args.init_sigma
lr_sigma =
not args.lr_decay:
lr_sigma = args.lr_sigma * lr_sigma_factor
lr_sh = args.lr_sh * lr_sh_factor
lr_basis = args.lr_basis * lr_basis_factor
batch_end = min(batch_begin + args.batch_size, epoch_size)
batch_origins = dset.rays.origins[batch_begin: batch_end]
batch_dirs = dset.rays.dirs[batch_begin: batch_end]
rgb_gt = dset.rays.gt[batch_begin: batch_end]
rays = svox2.Rays(batch_origins, batch_dirs)
# with Timing("volrend_fused"):
rgb_pred = grid.volume_render_fused(rays, rgb_gt,
beta_loss=args.lambda_beta,
sparsity_loss=args.lambda_sparsity,
randomize=args.enable_random)
# with Timing("loss_comp"):
mse = F.mse_loss(rgb_gt, rgb_pred)
# Stats
mse_num : float =
summary_writer.add_scalar("lr_sh", lr_sh, global_step=gstep_id)
summary_writer.add_scalar("lr_sigma", lr_sigma, global_step=gstep_id)
grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
summary_writer.add_scalar("lr_basis", lr_basis, global_step=gstep_id)
if grid.use_background:
summary_writer.add_scalar("lr_sigma_bg", lr_sigma_bg, global_step=gstep_id)
summary_writer.add_scalar("lr_color_bg", lr_color_bg, global_step=gstep_id)
if args.weight_decay_sh < 1.0:
grid.sh_data.data *= args.weight_decay_sigma
if args.weight_decay_sigma < 1.0:
grid.density_data.data *= args.weight_decay_s
args.lambda_tv > 0.0:
grid.inplace_tv_grad(grid.density_data.grad,
scaling=args.lambda_tv,
sparse_frac=args.tv_sparsity,
logalpha=args.tv_logalpha,
ndc_coeffs=dset.ndc_coeffs,
contiguous=args.tv_contiguous)
if args.lambda_tv_sh > 0.0:
grid.inplace_tv_color_grad(grid.sh_data.grad,
scaling=args.lambda_tv_sh,
sparse_frac=args.tv_sh_sparsity,
ndc_coeffs=dset.ndc_coeffs,
contiguous=args.tv_contiguous)
if args.lambda_tv_lumisphere > 0.0:
grid.inplace_tv_lumisphere_grad(grid.sh_data.grad,
scaling=args.lambda_tv_lumisphere,
dir_factor=args.tv_lumisphere_dir_factor,
sparse_frac=args.tv_lumisphere_sparsity,
ndc_coeffs=dset.ndc_coeffs)
if args.lambda_l2_sh > 0.0:
grid.inplace_l2_color_grad(grid.sh_data.grad,
scaling=args.lambda_l2_sh)
if grid.use_background and (args.lambda_tv_background_sigma > 0.0 or args.lambda_tv_background_color > 0.0):
grid.inplace_tv_background_grad(grid.background_data.grad,
scaling=args.lambda_tv_background_color,
scaling_density=args.lambda_tv_background_sigma,
sparse_frac=args.tv_background_sparsity,
contiguous=args.tv_contiguous)
if args.lambda_tv_basis > 0.0:
tv_basis = grid.tv_basis()
loss_tv_basis =
grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
grid.optim_basis_step(lr_basis, beta=args.rms_beta, optim=args.basis_optim)
elif grid.basis_type == svox2.BASIS_TYPE_MLP:
optim_basis_mlp.step()
optim_basis_mlp.zero_grad()
train_step()
gc.collect()
gstep_id_base +=
args.save_every > 0 and (epoch_id + 1) % max(
factor, args.save_every) == 0 and not args.tune_mode:
print('Saving', ckpt_path)
grid.save(ckpt_path)
if (gstep_id_base - last_upsamp_step) >= args.upsamp_every:
last_upsamp_step = gstep_id_base
print('* Upsampling from', reso_list[reso_id], 'to', reso_list[reso_id + 1])
args.tv_early_only > 0:
print('turning off TV regularization')
args.lambda_tv = 0.0
args.lambda_tv_sh = 0.0
elif args.tv_decay != 1.0:
args.lambda_tv *= args.tv_decay
args.lambda_tv_sh *= args.tv_decay
reso_id += 1
use_sparsify = True
z_reso = reso_list[reso_id]
grid.use_background and reso_id <= 1:
grid.sparsify_background(args.background_density_thresh)
if args.upsample_density_add:
grid.density_data.data[:] += args.upsample_density_add
if factor > 1 and reso_id < len(reso_list) - 1:
print('* Using higher resolution images due to large grid; new factor', factor)
factor //= 2
dset.gen_rays(factor=factor)
dset.shuffle_rays()
if gstep_id_base >= args.n_iters:
print('* Final eval and save')
eval_step()
global_stop_time = datetime.now()
secs = (global_stop_time - global_start_time).total_seconds()
timings_file = open(os.path.join(args.train_dir, 'time_mins.txt'), 'a')
not args.tune_nosave:
grid.save(ckpt_path)
def viridis_cmap(gray: np.ndarray):
"""
Visualize a single-channel image using matplotlib's viridis color map
yellow is high value, blue is low
:param gray: np.ndarray, (H, W) or (H, W, 1) unscaled
:return: (H, W, 3) float32 in [0, 1]
"""
colored = plt.cm.viridis(plt.Normalize()(gray.squeeze()))[..., :-1]
return colored.astype(np.float32)
def generate_dirs_equirect(w, h):
x, y = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking
np.arange(w, dtype=np.float32) + 0.5, # X-Axis (columns)
np.arange(h, dtype=np.float32) + 0.5, # Y-Axis (rows)
indexing="xy",
)
uv = np.stack([x * (2.0 / w) - 1.0, y * (2.0 / h) - 1.0], axis=-1)
camera_dirs = equirect2xyz(uv)
return camera_dirs
def eval_step():
# Put in a function to avoid memory leak
print('Eval step')
with torch.no_grad():
stats_test = {'psnr' : 0.0, 'mse' : 0.0}
# Standard set
N_IMGS_TO_EVAL = min(20 if epoch_id > 0 else 5, dset_test.n_images)
N_IMGS_TO_SAVE = N_IMGS_TO_EVAL # if not args.tune_mode else 1
img_eval_interval = dset_test.n_images // N_IMGS_TO_EVAL
img_save_interval = (N_IMGS_TO_EVAL // N_IMGS_TO_SAVE)
img_ids = range(0, dset_test.n_images, img_eval_interval)
# Special 'very hard' specular + fuzz set
# img_ids = [2, 5, 7, 9, 21,
# 44, 45, 47, 49, 56,
# 80, 88, 99, 115, 120,
# 154]
# img_save_interval = 1
n_images_gen = 0
for i, img_id in tqdm(enumerate(img_ids), total=len(img_ids)):
c2w = dset_test.c2w[img_id].to(device=device)
cam = svox2.Camera(c2w,
dset_test.intrins.get('fx', img_id),
dset_test.intrins.get('fy', img_id),
dset_test.intrins.get('cx', img_id),
dset_test.intrins.get('cy', img_id),
width=dset_test.get_image_size(img_id)[1],
height=dset_test.get_image_size(img_id)[0],
ndc_coeffs=dset_test.ndc_coeffs)
rgb_pred_test = grid.volume_render_image(cam, use_kernel=True)
rgb_gt_test = dset_test.gt[img_id].to(device=device)
all_mses = ((rgb_gt_test - rgb_pred_test) ** 2).cpu()
if i % img_save_interval == 0:
img_pred = rgb_pred_test.cpu()
img_pred.clamp_max_(1.0)
summary_writer.add_image(f'test/image_{img_id:04d}',
img_pred, global_step=gstep_id_base, dataformats='HWC')
if args.log_mse_image:
mse_img = all_mses / all_mses.max()
summary_writer.add_image(f'test/mse_map_{img_id:04d}',
mse_img, global_step=gstep_id_base, dataformats='HWC')
if args.log_depth_map:
depth_img = grid.volume_render_depth_image(cam,
args.log_depth_map_use_thresh if
args.log_depth_map_use_thresh else None
)
depth_img = viridis_cmap(depth_img.cpu())
summary_writer.add_image(f'test/depth_map_{img_id:04d}',
depth_img,
global_step=gstep_id_base, dataformats='HWC')
rgb_pred_test = rgb_gt_test = None
mse_num : float = all_mses.mean().item()
psnr = -10.0 * math.log10(mse_num)
if math.isnan(psnr):
print('NAN PSNR', i, img_id, mse_num)
assert False
stats_test['mse'] += mse_num
stats_test['psnr'] += psnr
n_images_gen += 1
if grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE or \
grid.basis_type == svox2.BASIS_TYPE_MLP:
# Add spherical map visualization
EQ_RESO = 256
eq_dirs = generate_dirs_equirect(EQ_RESO * 2, EQ_RESO)
eq_dirs = torch.from_numpy(eq_dirs).to(device=device).view(-1, 3)
if grid.basis_type == svox2.BASIS_TYPE_MLP:
sphfuncs = grid._eval_basis_mlp(eq_dirs)
else:
sphfuncs = grid._eval_learned_bases(eq_dirs)
sphfuncs = sphfuncs.view(EQ_RESO, EQ_RESO*2, -1).permute([2, 0, 1]).cpu().numpy()
stats = [(sphfunc.min(), sphfunc.mean(), sphfunc.max())
for sphfunc in sphfuncs]
sphfuncs_cmapped = [viridis_cmap(sphfunc) for sphfunc in sphfuncs]
for im, (minv, meanv, maxv) in zip(sphfuncs_cmapped, stats):
cv2.putText(im, f"{minv=:.4f} {meanv=:.4f} {maxv=:.4f}", (10, 20),
0, 0.5, [255, 0, 0])
sphfuncs_cmapped = np.concatenate(sphfuncs_cmapped, axis=0)
summary_writer.add_image(f'test/spheric',
sphfuncs_cmapped, global_step=gstep_id_base, dataformats='HWC')
# END add spherical map visualization
stats_test['mse'] /= n_images_gen
stats_test['psnr'] /= n_images_gen
for stat_name in stats_test:
summary_writer.add_scalar('test/' + stat_name,
stats_test[stat_name], global_step=gstep_id_base)
summary_writer.add_scalar('epoch_id', float(epoch_id), global_step=gstep_id_base)
print('eval stats:', stats_test) | null |
21,013 | import torch
import torch.cuda
import torch.optim
import torch.nn.functional as F
import svox2
import json
import imageio
import os
from os import path
import shutil
import gc
import numpy as np
import math
import argparse
import cv2
from util.dataset import datasets
from util.util import Timing, get_expon_lr_func, generate_dirs_equirect, viridis_cmap
from util import config_util
from warnings import warn
from datetime import datetime
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from typing import NamedTuple, Optional, Union
summary_writer
args.background_nlayers > 0 and not dset.should_use_background:
warn('Using a background model for dataset type ' + str(type(dset)) + ' which typically does not use background')
dset_test = datasets[args.dataset_type](
args.data_dir, split="test", **config_util.build_data_options(args))
global_start_time = datetime.now()
grid = svox2.SparseGrid(reso=reso_list[reso_id],
center=dset.scene_center,
radius=dset.scene_radius,
use_sphere_bound=dset.use_sphere_bound and not args.nosphereinit,
basis_dim=args.sh_dim,
use_z_order=True,
device=device,
basis_reso=args.basis_reso,
basis_type=svox2.__dict__['BASIS_TYPE_' + args.basis_type.upper()],
mlp_posenc_size=args.mlp_posenc_size,
mlp_width=args.mlp_width,
background_nlayers=args.background_nlayers,
background_reso=args.background_reso)
grid.sh_data.data[:] = 0.0
grid.density_data.data[:] = args.lr_fg_begin_step > 0 args.init_sigma
grid.use_background:
grid.background_data.data[..., -1] = args.init_sigma_bg
# grid.background_data.data[..., :-1] = 0.5 / svox2.utils.SH_C0
optim_basis_mlp =
grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
grid.reinit_learned_bases(init_type='sh')
# grid.reinit_learned_bases(init_type='fourier')
# grid.reinit_learned_bases(init_type='sg', upper_hemi=True)
# grid.basis_data.data.normal_(mean=0.28209479177387814, std=0.001)
elif grid.basis_type == svox2.BASIS_TYPE_MLP:
optim_basis_mlp = torch.optim.Adam(
grid.basis_mlp.parameters(),
lr=args.lr_basis
)
grid.requires_grad_(True)
config_util.setup_render_opts(grid.opt, args)
print('Render options', grid.opt)
gstep_id_base = 0
resample_cameras = [
svox2.Camera(c2w.to(device=device),
dset.intrins.get('fx', i),
dset.intrins.get('fy', i),
dset.intrins.get('cx', i),
dset.intrins.get('cy', i),
width=dset.get_image_size(i)[1],
height=dset.get_image_size(i)[0],
ndc_coeffs=dset.ndc_coeffs) for i, c2w in enumerate(dset.c2w)
]
ckpt_path = path.join(args.train_dir, 'ckpt.npz')
lr_sigma_func = get_expon_lr_func(args.lr_sigma, args.lr_sigma_final, args.lr_sigma_delay_steps,
args.lr_sigma_delay_mult, args.lr_sigma_decay_steps)
lr_sh_func = get_expon_lr_func(args.lr_sh, args.lr_sh_final, args.lr_sh_delay_steps,
args.lr_sh_delay_mult, args.lr_sh_decay_steps)
lr_basis_func = get_expon_lr_func(args.lr_basis, args.lr_basis_final, args.lr_basis_delay_steps,
args.lr_basis_delay_mult, args.lr_basis_decay_steps)
lr_sigma_bg_func = get_expon_lr_func(args.lr_sigma_bg, args.lr_sigma_bg_final, args.lr_sigma_bg_delay_steps,
args.lr_sigma_bg_delay_mult, args.lr_sigma_bg_decay_steps)
lr_color_bg_func = get_expon_lr_func(args.lr_color_bg, args.lr_color_bg_final, args.lr_color_bg_delay_steps,
args.lr_color_bg_delay_mult, args.lr_color_bg_decay_steps)
lr_sigma_factor = 1.0
lr_sh_factor = 1.0
lr_basis_factor = 1.0
last_upsamp_step = args.init_iters
args.enable_random:
warn("Randomness is enabled for training (normal for LLFF & scenes with background)")
epoch_id = -1
while True:
dset.shuffle_rays()
epoch_id += 1
epoch_size = dset.rays.origins.size(0)
batches_per_epoch = (epoch_size-1)//args.batch_size+1
# Test
def eval_step():
print('Eval step')
with torch.no_grad():
stats_test = {'psnr' : 0.0, 'mse' : 0.0}
# Standard set
N_IMGS_TO_EVAL = min(20 if epoch_id > 0 else 5, dset_test.n_images)
N_IMGS_TO_SAVE = N_IMGS_TO_EVAL # if not args.tune_mode else 1
img_eval_interval = dset_test.n_images // N_IMGS_TO_EVAL
img_save_interval = (N_IMGS_TO_EVAL // N_IMGS_TO_SAVE)
img_ids = range(0, dset_test.n_images, img_eval_interval)
# Special 'very hard' specular + fuzz set
# img_ids = [2, 5, 7, 9, 21,
# 44, 45, 47, 49, 56,
# 80, 88, 99, 115, 120,
# 154]
# img_save_interval = 1
n_images_gen = 0
for i, img_id in tqdm(enumerate(img_ids), total=len(img_ids)):
c2w = dset_test.c2w[img_id].to(device=device)
cam = svox2.Camera(c2w,
dset_test.intrins.get('fx', img_id),
dset_test.intrins.get('fy', img_id),
dset_test.intrins.get('cx', img_id),
dset_test.intrins.get('cy', img_id),
width=dset_test.get_image_size(img_id)[1],
height=dset_test.get_image_size(img_id)[0],
ndc_coeffs=dset_test.ndc_coeffs)
rgb_pred_test = grid.volume_render_image(cam, use_kernel=True)
rgb_gt_test = dset_test.gt[img_id].to(device=device)
all_mses = ((rgb_gt_test - rgb_pred_test) ** 2).cpu()
summary_writer.add_image(f'test/image_{img_id:04d}',
img_pred, global_step=gstep_id_base, dataformats='HWC')
args.log_mse_image:
mse_img = all_mses / all_mses.max()
summary_writer.add_image(f'test/mse_map_{img_id:04d}',
mse_img, global_step=gstep_id_base, dataformats='HWC')
if args.log_depth_map:
depth_img = grid.volume_render_depth_image(cam,
args.log_depth_map_use_thresh if
args.log_depth_map_use_thresh else None
)
depth_img = viridis_cmap(depth_img.cpu())
summary_writer.add_image(f'test/depth_map_{img_id:04d}',
depth_img,
global_step=gstep_id_base, dataformats='HWC')
rgb_pred_test = rgb_gt_test = None
mse_num : float =
math.isnan(psnr):
print('NAN PSNR', i, img_id, mse_num)
assert False
stats_test['mse'] += mse_num
stats_test['psnr'] += psnr
n_images_gen += 1
if grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE or \
grid.basis_type == svox2.BASIS_TYPE_MLP:
grid.basis_type == svox2.BASIS_TYPE_MLP:
sphfuncs = grid._eval_basis_mlp(eq_dirs)
else:
sphfuncs = grid._eval_learned_bases(eq_dirs)
sphfuncs = sphfuncs.view(EQ_RESO, EQ_RESO*2, -1).permute([2, 0, 1]).cpu().numpy()
stats = [(sphfunc.min(), sphfunc.mean(), sphfunc.max())
for sphfunc in sphfuncs]
sphfuncs_cmapped = [viridis_cmap(sphfunc) for sphfunc in sphfuncs]
for im, (minv, meanv, maxv) in zip(sphfuncs_cmapped, stats):
cv2.putText(im, f"{minv=:.4f} {meanv=:.4f} {maxv=:.4f}", (10, 20),
0, 0.5, [255, 0, 0])
sphfuncs_cmapped = np.concatenate(sphfuncs_cmapped, axis=0)
summary_writer.add_image(f'test/spheric',
sphfuncs_cmapped, global_step=gstep_id_base, dataformats='HWC')
# END add spherical map visualization
stats_test['mse'] /= n_images_gen
stats_test['psnr'] /= n_images_gen
for stat_name in stats_test:
summary_writer.add_scalar('test/' + stat_name,
stats_test[stat_name], global_step=gstep_id_base)
summary_writer.add_scalar('epoch_id', float(epoch_id), global_step=gstep_id_base)
print('eval stats:', stats_test)
if epoch_id % max(factor, args.eval_every) == 0: #and (epoch_id > 0 or not args.tune_mode):
iter_id + gstep_id_base
args.lr_fg_begin_step > 0 and gstep_id == args.lr_fg_begin_step:
grid.density_data.data[:] = args.init_sigma
lr_sigma = lr_sigma_func(gstep_id) * lr_sigma_factor
lr_sh = lr_sh_func(gstep_id) * lr_sh_factor
lr_basis = lr_basis_func(gstep_id - args.lr_basis_begin_step) * lr_basis_factor
lr_sigma_bg = lr_sigma_bg_func(gstep_id - args.lr_basis_begin_step) * lr_basis_factor
lr_color_bg = lr_color_bg_func(gstep_id - args.lr_basis_begin_step) * lr_basis_factor
not args.lr_decay:
lr_sigma = args.lr_sigma * lr_sigma_factor
lr_sh = args.lr_sh * lr_sh_factor
lr_basis = args.lr_basis * lr_basis_factor
batch_end = min(batch_begin + args.batch_size, epoch_size)
batch_origins = dset.rays.origins[batch_begin: batch_end]
batch_dirs = dset.rays.dirs[batch_begin: batch_end]
rgb_gt = dset.rays.gt[batch_begin: batch_end]
rays = svox2.Rays(batch_origins, batch_dirs)
# with Timing("volrend_fused"):
rgb_pred = grid.volume_render_fused(rays, rgb_gt,
beta_loss=args.lambda_beta,
sparsity_loss=args.lambda_sparsity,
randomize=args.enable_random)
# with Timing("loss_comp"):
mse = F.mse_loss(rgb_gt, rgb_pred)
# Stats
mse_num : float = mse.detach().item()
psnr = -10.0 * math.log10(mse_num)
stats['mse'] += mse_num
stats['psnr'] += psnr
stats['invsqr_mse'] += 1.0 / mse_num ** 2
(iter_id + 1) % args.print_every == 0:
pbar.set_description(f'epoch {epoch_id} psnr={psnr:.2f}')
for stat_name in stats:
stat_val = stats[stat_name] / args.print_every
summary_writer.add_scalar(stat_name, stat_val, global_step=gstep_id)
stats[stat_name] = 0.0
summary_writer.add_scalar("lr_sh", lr_sh, global_step=gstep_id)
summary_writer.add_scalar("lr_sigma", lr_sigma, global_step=gstep_id)
grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
summary_writer.add_scalar("lr_basis", lr_basis, global_step=gstep_id)
if grid.use_background:
summary_writer.add_scalar("lr_sigma_bg", lr_sigma_bg, global_step=gstep_id)
summary_writer.add_scalar("lr_color_bg", lr_color_bg, global_step=gstep_id)
if args.weight_decay_sh < 1.0:
grid.sh_data.data *= args.weight_decay_sigma
if args.weight_decay_sigma < 1.0:
grid.density_data.data *= args.weight_decay_s
args.lambda_tv > 0.0:
grid.inplace_tv_grad(grid.density_data.grad,
scaling=args.lambda_tv,
sparse_frac=args.tv_sparsity,
logalpha=args.tv_logalpha,
ndc_coeffs=dset.ndc_coeffs,
contiguous=args.tv_contiguous)
if args.lambda_tv_sh > 0.0:
grid.inplace_tv_color_grad(grid.sh_data.grad,
scaling=args.lambda_tv_sh,
sparse_frac=args.tv_sh_sparsity,
ndc_coeffs=dset.ndc_coeffs,
contiguous=args.tv_contiguous)
if args.lambda_tv_lumisphere > 0.0:
grid.inplace_tv_lumisphere_grad(grid.sh_data.grad,
scaling=args.lambda_tv_lumisphere,
dir_factor=args.tv_lumisphere_dir_factor,
sparse_frac=args.tv_lumisphere_sparsity,
ndc_coeffs=dset.ndc_coeffs)
if args.lambda_l2_sh > 0.0:
grid.inplace_l2_color_grad(grid.sh_data.grad,
scaling=args.lambda_l2_sh)
if grid.use_background and (args.lambda_tv_background_sigma > 0.0 or args.lambda_tv_background_color > 0.0):
grid.inplace_tv_background_grad(grid.background_data.grad,
scaling=args.lambda_tv_background_color,
scaling_density=args.lambda_tv_background_sigma,
sparse_frac=args.tv_background_sparsity,
contiguous=args.tv_contiguous)
if args.lambda_tv_basis > 0.0:
tv_basis = grid.tv_basis()
loss_tv_basis = tv_basis * args.lambda_tv_basis
loss_tv_basis.backward(
gstep_id >= args.lr_fg_begin_step:
grid.optim_density_step(lr_sigma, beta=args.rms_beta, optim=args.sigma_optim)
grid.optim_sh_step(lr_sh, beta=args.rms_beta, optim=args.sh_optim)
if grid.use_background:
grid.optim_background_step(lr_sigma_bg, lr_color_bg, beta=args.rms_beta, optim=args.bg_optim)
if gstep_id >= args.lr_basis_begin_ste
grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
grid.optim_basis_step(lr_basis, beta=args.rms_beta, optim=args.basis_optim)
elif grid.basis_type == svox2.BASIS_TYPE_MLP:
optim_basis_mlp.step()
optim_basis_mlp.zero_grad()
train_step()
gc.collect()
gstep_id_base += batches_per_epoch
args.save_every > 0 and (epoch_id + 1) % max(
factor, args.save_every) == 0 and not args.tune_mode:
print('Saving', ckpt_path)
grid.save(ckpt_path)
if (gstep_id_base - last_upsamp_step) >= args.upsamp_every:
last_upsamp_step = gstep_id_base
print('* Upsampling from', reso_list[reso_id], 'to', reso_list[reso_id + 1])
args.tv_early_only > 0:
print('turning off TV regularization')
args.lambda_tv = 0.0
args.lambda_tv_sh = 0.0
elif args.tv_decay != 1.0:
args.lambda_tv *= args.tv_decay
args.lambda_tv_sh *= args.tv_decay
reso_id += 1
use_sparsify = True
z_reso = reso_list[reso_id]
grid.use_background and reso_id <= 1:
grid.sparsify_background(args.background_density_thresh)
if args.upsample_density_add:
grid.density_data.data[:] += args.upsample_density_add
if factor > 1 and reso_id < len(reso_list) - 1:
print('* Using higher resolution images due to large grid; new factor', factor)
factor //= 2
dset.gen_rays(factor=factor)
dset.shuffle_rays()
if gstep_id_base >= args.n_iters:
print('* Final eval and save')
eval_step()
global_stop_time = datetime.now()
secs = (global_stop_time - global_start_time).total_seconds()
timings_file = open(os.path.join(args.train_dir, 'time_mins.txt'), 'a')
not args.tune_nosave:
grid.save(ckpt_path)
def train_step():
print('Train step')
pbar = tqdm(enumerate(range(0, epoch_size, args.batch_size)), total=batches_per_epoch)
stats = {"mse" : 0.0, "psnr" : 0.0, "invsqr_mse" : 0.0}
for iter_id, batch_begin in pbar:
gstep_id = iter_id + gstep_id_base
if args.lr_fg_begin_step > 0 and gstep_id == args.lr_fg_begin_step:
grid.density_data.data[:] = args.init_sigma
lr_sigma = lr_sigma_func(gstep_id) * lr_sigma_factor
lr_sh = lr_sh_func(gstep_id) * lr_sh_factor
lr_basis = lr_basis_func(gstep_id - args.lr_basis_begin_step) * lr_basis_factor
lr_sigma_bg = lr_sigma_bg_func(gstep_id - args.lr_basis_begin_step) * lr_basis_factor
lr_color_bg = lr_color_bg_func(gstep_id - args.lr_basis_begin_step) * lr_basis_factor
if not args.lr_decay:
lr_sigma = args.lr_sigma * lr_sigma_factor
lr_sh = args.lr_sh * lr_sh_factor
lr_basis = args.lr_basis * lr_basis_factor
batch_end = min(batch_begin + args.batch_size, epoch_size)
batch_origins = dset.rays.origins[batch_begin: batch_end]
batch_dirs = dset.rays.dirs[batch_begin: batch_end]
rgb_gt = dset.rays.gt[batch_begin: batch_end]
rays = svox2.Rays(batch_origins, batch_dirs)
# with Timing("volrend_fused"):
rgb_pred = grid.volume_render_fused(rays, rgb_gt,
beta_loss=args.lambda_beta,
sparsity_loss=args.lambda_sparsity,
randomize=args.enable_random)
# with Timing("loss_comp"):
mse = F.mse_loss(rgb_gt, rgb_pred)
# Stats
mse_num : float = mse.detach().item()
psnr = -10.0 * math.log10(mse_num)
stats['mse'] += mse_num
stats['psnr'] += psnr
stats['invsqr_mse'] += 1.0 / mse_num ** 2
if (iter_id + 1) % args.print_every == 0:
# Print averaged stats
pbar.set_description(f'epoch {epoch_id} psnr={psnr:.2f}')
for stat_name in stats:
stat_val = stats[stat_name] / args.print_every
summary_writer.add_scalar(stat_name, stat_val, global_step=gstep_id)
stats[stat_name] = 0.0
# if args.lambda_tv > 0.0:
# with torch.no_grad():
# tv = grid.tv(logalpha=args.tv_logalpha, ndc_coeffs=dset.ndc_coeffs)
# summary_writer.add_scalar("loss_tv", tv, global_step=gstep_id)
# if args.lambda_tv_sh > 0.0:
# with torch.no_grad():
# tv_sh = grid.tv_color()
# summary_writer.add_scalar("loss_tv_sh", tv_sh, global_step=gstep_id)
# with torch.no_grad():
# tv_basis = grid.tv_basis() # summary_writer.add_scalar("loss_tv_basis", tv_basis, global_step=gstep_id)
summary_writer.add_scalar("lr_sh", lr_sh, global_step=gstep_id)
summary_writer.add_scalar("lr_sigma", lr_sigma, global_step=gstep_id)
if grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
summary_writer.add_scalar("lr_basis", lr_basis, global_step=gstep_id)
if grid.use_background:
summary_writer.add_scalar("lr_sigma_bg", lr_sigma_bg, global_step=gstep_id)
summary_writer.add_scalar("lr_color_bg", lr_color_bg, global_step=gstep_id)
if args.weight_decay_sh < 1.0:
grid.sh_data.data *= args.weight_decay_sigma
if args.weight_decay_sigma < 1.0:
grid.density_data.data *= args.weight_decay_sh
# # For outputting the % sparsity of the gradient
# indexer = grid.sparse_sh_grad_indexer
# if indexer is not None:
# if indexer.dtype == torch.bool:
# nz = torch.count_nonzero(indexer)
# else:
# nz = indexer.size()
# with open(os.path.join(args.train_dir, 'grad_sparsity.txt'), 'a') as sparsity_file:
# sparsity_file.write(f"{gstep_id} {nz}\n")
# Apply TV/Sparsity regularizers
if args.lambda_tv > 0.0:
# with Timing("tv_inpl"):
grid.inplace_tv_grad(grid.density_data.grad,
scaling=args.lambda_tv,
sparse_frac=args.tv_sparsity,
logalpha=args.tv_logalpha,
ndc_coeffs=dset.ndc_coeffs,
contiguous=args.tv_contiguous)
if args.lambda_tv_sh > 0.0:
# with Timing("tv_color_inpl"):
grid.inplace_tv_color_grad(grid.sh_data.grad,
scaling=args.lambda_tv_sh,
sparse_frac=args.tv_sh_sparsity,
ndc_coeffs=dset.ndc_coeffs,
contiguous=args.tv_contiguous)
if args.lambda_tv_lumisphere > 0.0:
grid.inplace_tv_lumisphere_grad(grid.sh_data.grad,
scaling=args.lambda_tv_lumisphere,
dir_factor=args.tv_lumisphere_dir_factor,
sparse_frac=args.tv_lumisphere_sparsity,
ndc_coeffs=dset.ndc_coeffs)
if args.lambda_l2_sh > 0.0:
grid.inplace_l2_color_grad(grid.sh_data.grad,
scaling=args.lambda_l2_sh)
if grid.use_background and (args.lambda_tv_background_sigma > 0.0 or args.lambda_tv_background_color > 0.0):
grid.inplace_tv_background_grad(grid.background_data.grad,
scaling=args.lambda_tv_background_color,
scaling_density=args.lambda_tv_background_sigma,
sparse_frac=args.tv_background_sparsity,
contiguous=args.tv_contiguous)
if args.lambda_tv_basis > 0.0:
tv_basis = grid.tv_basis()
loss_tv_basis = tv_basis * args.lambda_tv_basis
loss_tv_basis.backward()
# print('nz density', torch.count_nonzero(grid.sparse_grad_indexer).item(),
# ' sh', torch.count_nonzero(grid.sparse_sh_grad_indexer).item())
# Manual SGD/rmsprop step
if gstep_id >= args.lr_fg_begin_step:
grid.optim_density_step(lr_sigma, beta=args.rms_beta, optim=args.sigma_optim)
grid.optim_sh_step(lr_sh, beta=args.rms_beta, optim=args.sh_optim)
if grid.use_background:
grid.optim_background_step(lr_sigma_bg, lr_color_bg, beta=args.rms_beta, optim=args.bg_optim)
if gstep_id >= args.lr_basis_begin_step:
if grid.basis_type == svox2.BASIS_TYPE_3D_TEXTURE:
grid.optim_basis_step(lr_basis, beta=args.rms_beta, optim=args.basis_optim)
elif grid.basis_type == svox2.BASIS_TYPE_MLP:
optim_basis_mlp.step()
optim_basis_mlp.zero_grad() | null |
21,014 | import os
import os.path as osp
from typing import NamedTuple, List
import argparse
import random
class Dir(NamedTuple):
name: str
valid_exts: List[str]
dirs, dir_idx = list_filter_dirs(args.root_dir)
def list_filter_dirs(base):
all_dirs = [x for x in os.listdir(base) if osp.isdir(osp.join(base, x))]
image_exts = [".png", ".jpg", ".jpeg", ".gif", ".tif", ".tiff", ".bmp"]
depth_exts = [".exr", ".pfm", ".png", ".npy"]
dirs_prefixes = [Dir(name="pose", valid_exts=[".txt"]),
Dir(name="poses", valid_exts=[".txt"]),
Dir(name="feature", valid_exts=[".npz"]),
Dir(name="rgb", valid_exts=image_exts),
Dir(name="images", valid_exts=image_exts),
Dir(name="image", valid_exts=image_exts),
Dir(name="c2w", valid_exts=image_exts),
Dir(name="depths", valid_exts=depth_exts)]
dirs = []
dir_idx = 0
for pfx in dirs_prefixes:
for d in all_dirs:
if d.startswith(pfx.name):
if d == "pose":
dir_idx = len(dirs)
dirs.append(Dir(name=osp.join(base, d), valid_exts=pfx.valid_exts))
return dirs, dir_idx | null |
21,015 | import os
import os.path as osp
import click
from typing import NamedTuple, List
import argparse
class Dir(NamedTuple):
name: str
valid_exts: List[str]
dirs, dir_idx = list_filter_dirs(args.root_dir)
def list_filter_dirs(base):
all_dirs = [x for x in os.listdir(base) if osp.isdir(osp.join(base, x))]
image_exts = [".png", ".jpg", ".jpeg", ".gif", ".tif", ".tiff", ".bmp"]
depth_exts = [".exr", ".pfm", ".png", ".npy"]
dirs_prefixes = [Dir(name="pose", valid_exts=[".txt"]),
Dir(name="feature", valid_exts=[".npz"]),
Dir(name="rgb", valid_exts=image_exts),
Dir(name="images", valid_exts=image_exts),
Dir(name="depths", valid_exts=depth_exts)]
dirs = []
dir_idx = 0
for pfx in dirs_prefixes:
for d in all_dirs:
if d.startswith(pfx.name):
if d == "pose":
dir_idx = len(dirs)
dirs.append(Dir(name=osp.join(base, d), valid_exts=pfx.valid_exts))
return dirs, dir_idx | null |
21,016 | import sys
import os
from os import path
import warnings
import numpy as np
import math
from argparse import ArgumentParser
from nerfvis import Scene
from scipy.spatial.transform import Rotation
def align_umeyama(model, data, known_scale=False, yaw_only=False):
"""Implementation of the paper: S. Umeyama, Least-Squares Estimation
of Transformation Parameters Between Two Point Patterns,
IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991.
model = s * R * data + t
Input:
model -- first trajectory (nx3), numpy array type
data -- second trajectory (nx3), numpy array type
Output:
s -- scale factor (scalar)
R -- rotation matrix (3x3)
t -- translation vector (3x1)
t_error -- translational error per point (1xn)
"""
# substract mean
mu_M = model.mean(0)
mu_D = data.mean(0)
model_zerocentered = model - mu_M
data_zerocentered = data - mu_D
n = np.shape(model)[0]
# correlation
C = 1.0/n*np.dot(model_zerocentered.transpose(), data_zerocentered)
sigma2 = 1.0/n*np.multiply(data_zerocentered, data_zerocentered).sum()
U_svd, D_svd, V_svd = np.linalg.linalg.svd(C)
D_svd = np.diag(D_svd)
V_svd = np.transpose(V_svd)
S = np.eye(3)
if(np.linalg.det(U_svd)*np.linalg.det(V_svd) < 0):
S[2, 2] = -1
if yaw_only:
rot_C = np.dot(data_zerocentered.transpose(), model_zerocentered)
theta = get_best_yaw(rot_C)
R = rot_z(theta)
else:
R = np.dot(U_svd, np.dot(S, np.transpose(V_svd)))
if known_scale:
s = 1
else:
s = 1.0/sigma2*np.trace(np.dot(D_svd, S))
t = mu_M-s*np.dot(R, mu_D)
return s, R, t
The provided code snippet includes necessary dependencies for implementing the `align_procrustes_rt` function. Write a Python function `def align_procrustes_rt(t_a : np.ndarray, q_a : np.ndarray, t_ref : np.ndarray, use_first_k : int = 1000000, want_transform : bool = False)` to solve the following problem:
Align translation + rotation :param t_a: camera translations to align (N, 3) :param q_a: camera rotations to align (xyz axis-angle, xyzw quaternion, or rotation matrix) (N, {3, 4, 9}) :param t_ref: reference camera translations (N, 3) :param use_first_k: int, if set, uses only first k number of cameras to align :param want_transform: bool, if set, returns transform function instead of transformed points :return: if want_transform == False: t (N, 3), q (N, {3, 4, 9}) similarity-transformed version of cameraa poses, aligned to ref else: function which given points, applies the aligning transform
Here is the function:
def align_procrustes_rt(t_a : np.ndarray, q_a : np.ndarray,
t_ref : np.ndarray,
use_first_k : int = 1000000,
want_transform : bool = False):
"""
Align translation + rotation
:param t_a: camera translations to align (N, 3)
:param q_a: camera rotations to align (xyz axis-angle, xyzw quaternion, or rotation matrix) (N, {3, 4, 9})
:param t_ref: reference camera translations (N, 3)
:param use_first_k: int, if set, uses only first k number of cameras to align
:param want_transform: bool, if set, returns transform function instead of transformed points
:return:
if want_transform == False:
t (N, 3), q (N, {3, 4, 9}) similarity-transformed version of cameraa poses, aligned to ref
else: function which given points, applies the aligning transform
"""
assert t_ref.shape[0] == t_a.shape[0]
s, R, t = align_umeyama(t_ref[:use_first_k], t_a[:use_first_k])
# # Advanced alignment
# n_points = t_a.shape[0]
# z = np.zeros((n_points, 3))
# z[:, -1] = 0.05
# t_a_aug = t_a + quaternion_rotate_vector_np(q_a, z) / s
# t_ref_aug = t_ref + quaternion_rotate_vector_np(q_ref, z)
#
# _, R, t = align_umeyama(np.concatenate([t_ref, t_ref_aug], axis=0), np.concatenate([t_a * s, t_a_aug * s], axis=0), known_scale=True)
def transform(t_b : np.ndarray, q_b : np.ndarray):
t_align = s * t_b @ R.T + t
Ra = Rotation.from_matrix(R)
q_align = (Ra * Rotation.from_matrix(q_b)).as_matrix()
return t_align, q_align
return transform if want_transform else transform(t_a, q_a) | Align translation + rotation :param t_a: camera translations to align (N, 3) :param q_a: camera rotations to align (xyz axis-angle, xyzw quaternion, or rotation matrix) (N, {3, 4, 9}) :param t_ref: reference camera translations (N, 3) :param use_first_k: int, if set, uses only first k number of cameras to align :param want_transform: bool, if set, returns transform function instead of transformed points :return: if want_transform == False: t (N, 3), q (N, {3, 4, 9}) similarity-transformed version of cameraa poses, aligned to ref else: function which given points, applies the aligning transform |
21,017 | import sys
import os
from os import path
import warnings
import numpy as np
import math
from argparse import ArgumentParser
from nerfvis import Scene
from scipy.spatial.transform import Rotation
The provided code snippet includes necessary dependencies for implementing the `get_image_size` function. Write a Python function `def get_image_size(path : str)` to solve the following problem:
Get image size without loading it
Here is the function:
def get_image_size(path : str):
"""
Get image size without loading it
"""
from PIL import Image
im = Image.open(path)
return im.size # W, H | Get image size without loading it |
21,018 | import sys
import os
from os import path
import warnings
import numpy as np
import math
from argparse import ArgumentParser
from nerfvis import Scene
from scipy.spatial.transform import Rotation
def sort_key(x):
if len(x) > 2 and x[1] == "_":
return x[2:]
return x | null |
21,019 | import os
import os.path as osp
import numpy as np
import struct
import collections
import argparse
import shutil
def qvec2rotmat(qvec):
return np.array(
[
[
1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2],
],
[
2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1],
],
[
2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2,
],
]
) | null |
21,020 | import os
import os.path as osp
import numpy as np
import struct
import collections
import argparse
import shutil
Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"])
Point3D = collections.namedtuple(
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]
)
class Image(BaseImage):
def qvec2rotmat(self):
return qvec2rotmat(self.qvec)
CAMERA_MODEL_IDS = dict(
[(camera_model.model_id, camera_model) for camera_model in CAMERA_MODELS]
)
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
"""Read and unpack the next bytes from a binary file.
:param fid:
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
:param endian_character: Any of {@, =, <, >, !}
:return: Tuple of read and unpacked values.
"""
data = fid.read(num_bytes)
return struct.unpack(endian_character + format_char_sequence, data)
def read_colmap_sparse(sparse_path):
cameras = []
with open(osp.join(sparse_path, "cameras.bin"), "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
assert num_cameras == 1, "Only supports single camera"
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ"
)
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
assert model_name in ["SIMPLE_PINHOLE", "SIMPLE_RADIAL"], \
"Only SIMPLE_PINHOLE/SIMPLE_RADIAL supported"
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(
fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params
)
cameras.append(
Camera(
id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params),
)
)
assert len(cameras) == num_cameras
points3D_idmap = {}
points3D = []
with open(osp.join(sparse_path, "points3D.bin"), "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for i in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd"
)
point3D_id = binary_point_line_properties[0]
points3D_idmap[point3D_id] = i
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[
0
]
track_elems = read_next_bytes(
fid,
num_bytes=8 * track_length,
format_char_sequence="ii" * track_length,
)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D.append(
Point3D(
id=point3D_id,
xyz=xyz,
rgb=rgb,
error=error,
image_ids=image_ids,
point2D_idxs=point2D_idxs,
)
)
images = []
with open(osp.join(sparse_path, "images.bin"), "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi"
)
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[
0
]
x_y_id_s = read_next_bytes(
fid,
num_bytes=24 * num_points2D,
format_char_sequence="ddq" * num_points2D,
)
xys = np.column_stack(
[tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))]
)
point3D_ids = list(map(int, x_y_id_s[2::3]))
point3D_ids = [points3D_idmap[x] for x in point3D_ids if x >= 0]
point3D_ids = np.array(point3D_ids)
images.append(
Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3D_ids=point3D_ids,
)
)
return cameras, images, points3D | null |
21,021 | import os
import collections
import numpy as np
import struct
import argparse
def read_cameras_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasText(const std::string& path)
void Reconstruction::ReadCamerasText(const std::string& path)
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
def read_cameras_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
points3D = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
point3D_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
points3D = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
point3D_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D[point3D_id] = Point3D(
id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def detect_model_format(path, ext):
if os.path.isfile(os.path.join(path, "cameras" + ext)) and \
os.path.isfile(os.path.join(path, "images" + ext)) and \
os.path.isfile(os.path.join(path, "points3D" + ext)):
print("Detected model format: '" + ext + "'")
return True
return False
def read_model(path, ext=""):
# try to detect the extension automatically
if ext == "":
if detect_model_format(path, ".bin"):
ext = ".bin"
elif detect_model_format(path, ".txt"):
ext = ".txt"
else:
print("Provide model format: '.bin' or '.txt'")
return
if ext == ".txt":
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
images = read_images_text(os.path.join(path, "images" + ext))
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
else:
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
images = read_images_binary(os.path.join(path, "images" + ext))
points3D = read_points3D_binary(os.path.join(path, "points3D") + ext)
return cameras, images, points3D | null |
21,022 | import os
import collections
import numpy as np
import struct
import argparse
def write_cameras_text(cameras, path):
def write_cameras_binary(cameras, path_to_model_file):
def write_images_text(images, path):
def write_images_binary(images, path_to_model_file):
def write_points3D_text(points3D, path):
def write_points3D_binary(points3D, path_to_model_file):
def write_model(cameras, images, points3D, path, ext=".bin"):
if ext == ".txt":
write_cameras_text(cameras, os.path.join(path, "cameras" + ext))
write_images_text(images, os.path.join(path, "images" + ext))
write_points3D_text(points3D, os.path.join(path, "points3D") + ext)
else:
write_cameras_binary(cameras, os.path.join(path, "cameras" + ext))
write_images_binary(images, os.path.join(path, "images" + ext))
write_points3D_binary(points3D, os.path.join(path, "points3D") + ext)
return cameras, images, points3D | null |
21,023 | import os
import collections
import numpy as np
import struct
import argparse
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec | null |
21,024 | import os
import shutil
from glob import glob
import json
import numpy as np
from PIL import Image
import argparse
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(data_dir : str, out_data_dir : str)` to solve the following problem:
Convert Instant-NGP (modified NeRF) data to NSVF :param data_dir: the dataset dir (NeRF-NGP format) to convert :param out_data_dir: output dataset directory NSVF
Here is the function:
def convert(data_dir : str, out_data_dir : str):
"""
Convert Instant-NGP (modified NeRF) data to NSVF
:param data_dir: the dataset dir (NeRF-NGP format) to convert
:param out_data_dir: output dataset directory NSVF
"""
images_dir_name = os.path.join(out_data_dir, "images")
pose_dir_name = os.path.join(out_data_dir, "pose")
os.makedirs(images_dir_name, exist_ok=True)
os.makedirs(pose_dir_name, exist_ok=True)
def get_subdir(name):
if name.endswith("_train.json"):
return "train"
elif name.endswith("_val.json"):
return "val"
elif name.endswith("_test.json"):
return "test"
return ""
def get_out_prefix(name):
if name.endswith("_train.json"):
return "0_"
elif name.endswith("_val.json"):
return "1_"
elif name.endswith("_test.json"):
return "2_"
return ""
jsons = {
x: (get_subdir(x), get_out_prefix(x))
for x in glob(os.path.join(data_dir, "*.json"))
}
# OpenGL -> OpenCV
cam_trans = np.diag(np.array([1.0, -1.0, -1.0, 1.0]))
# fmt: off
world_trans = np.array(
[
[0.0, -1.0, 0.0, 0.0],
[0.0, 0.0, -1.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
# fmt: on
assert len(jsons) > 0, f"No jsons found in {data_dir}, can't convert"
cnt = 0
example_fpath = None
tj = {}
for tj_path, (tj_subdir, tj_out_prefix) in jsons.items():
with open(tj_path, "r") as f:
tj = json.load(f)
if "frames" not in tj:
print(f"No frames in json {tj_path}, skipping")
continue
for frame in tj["frames"]:
# Try direct relative path (used in newer NGP datasets)
fpath = os.path.join(data_dir, frame["file_path"])
if not os.path.isfile(fpath):
# Legacy path (NeRF)
fpath = os.path.join(
data_dir, tj_subdir, os.path.basename(frame["file_path"]) + ".png"
)
example_fpath = fpath
if not os.path.isfile(fpath):
print("Could not find image:", frame["file_path"], "(this may be ok)")
continue
ext = os.path.splitext(fpath)[1]
c2w = np.array(frame["transform_matrix"])
c2w = world_trans @ c2w @ cam_trans # To OpenCV
image_fname = tj_out_prefix + f"{cnt:05d}"
pose_path = os.path.join(pose_dir_name, image_fname + ".txt")
# Save 4x4 OpenCV C2W pose
np.savetxt(pose_path, c2w)
# Copy images
new_fpath = os.path.join(images_dir_name, image_fname + ext)
shutil.copyfile(fpath, new_fpath)
cnt += 1
assert len(tj) > 0, f"No valid jsons found in {data_dir}, can't convert"
w = tj.get("w")
h = tj.get("h")
if w is None or h is None:
assert example_fpath is not None
# Pose not available so load a image and get the size
w, h = Image.open(example_fpath).size
fx = float(0.5 * w / np.tan(0.5 * tj["camera_angle_x"]))
if "camera_angle_y" in tj:
fy = float(0.5 * h / np.tan(0.5 * tj["camera_angle_y"]))
else:
fy = fx
cx = tj.get("cx", w * 0.5)
cy = tj.get("cy", h * 0.5)
intrin_mtx = np.array([
[fx, 0.0, cx, 0.0],
[0.0, fy, cy, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
])
# Write intrinsics
np.savetxt(os.path.join(out_data_dir, "intrinsics.txt"), intrin_mtx) | Convert Instant-NGP (modified NeRF) data to NSVF :param data_dir: the dataset dir (NeRF-NGP format) to convert :param out_data_dir: output dataset directory NSVF |
21,025 | import cv2
import moviepy
import moviepy.editor
import numpy
import argparse
import os
import random
import shutil
import sys
import tempfile
import torch
import torchvision
import glob
import numpy as np
from tqdm import tqdm
from warnings import warn
def compute_poses(vid_root, args, overwrite=False):
vid_name = os.path.basename(vid_root)
colmap_dir = os.path.join(vid_root, 'sparse')
pose_fn = os.path.join(vid_root, 'poses_bounds.npy')
if not os.path.exists(pose_fn) or overwrite:
print(f'poses: {vid_name}')
# poses, pts3d, perm = load_colmap_data2(colmap_dir)
# if poses is not None:
# save_poses(colmap_dir, poses, pts3d, perm)
poses, pts3d, perm, save_arr = load_colmap_data(colmap_dir)
if save_arr is not None:
np.save(pose_fn, save_arr) | null |
21,026 | import cv2
import moviepy
import moviepy.editor
import numpy
import argparse
import os
import random
import shutil
import sys
import tempfile
import torch
import torchvision
import glob
import numpy as np
from tqdm import tqdm
from warnings import warn
def generate_masks(vid_root, args, overwrite=False):
print('compute masks')
vid_name = os.path.basename(vid_root)
masks_dir = os.path.join(vid_root, args.mask_output)
os.makedirs(masks_dir, exist_ok=True)
frames_dir = os.path.join(vid_root, args.image_input)
os.makedirs(frames_dir, exist_ok=True)
maskrnn_model = torchvision.models.detection.maskrcnn_resnet50_fpn(
pretrained=True).to(device).eval()
files = sorted(
glob.glob(os.path.join(vid_root, args.image_input, '*.jpg')) +
glob.glob(os.path.join(vid_root, args.image_input, '*.png')))
for file_ind, file in enumerate(tqdm(files, desc=f'masks: {vid_name}')):
fn_ext = os.path.basename(file)
fn = os.path.splitext(fn_ext)[0]
frame_fn = f'{frames_dir}/{fn_ext}'
out_mask_fn = f'{masks_dir}/{fn_ext}.png'
if os.path.exists(out_mask_fn):
continue
im = cv2.imread(frame_fn)
humans_tens = torch.FloatTensor(im.shape[0], im.shape[1]).fill_(1.0).to(device)
obj_predictions = maskrnn_model(
[torch.FloatTensor(im.transpose(2, 0, 1) / 255.0)[[2, 0, 1], :, :].to(device)])[0]
for mask_ind in range(obj_predictions['masks'].size(0)):
if obj_predictions['scores'][mask_ind].item() > 0.5:
if obj_predictions['labels'][mask_ind].item() == 1:
humans_tens[obj_predictions['masks'][mask_ind, 0, :, :] > 0.5] = 0.0
elif obj_predictions['labels'][mask_ind].item() == 31:
humans_tens[obj_predictions['masks'][mask_ind, 0, :, :] > 0.5] = 0.0
elif obj_predictions['labels'][mask_ind].item() == 32:
humans_tens[obj_predictions['masks'][mask_ind, 0, :, :] > 0.5] = 0.0
elif obj_predictions['labels'][mask_ind].item() == 48:
humans_tens[obj_predictions['masks'][mask_ind, 0, :, :] > 0.5] = 0.0
# dog
elif obj_predictions['labels'][mask_ind].item() == 18:
humans_tens[obj_predictions['masks'][mask_ind, 0, :, :] > 0.5] = 0.0
mask_np = cv2.erode(
src=humans_tens.cpu().numpy(),
kernel=numpy.ones([3, 3], numpy.float32),
anchor=(-1, -1),
iterations=16,
borderType=cv2.BORDER_DEFAULT)
mask_np = (mask_np * 255.0).clip(0.0, 255.0).astype(numpy.uint8)
cv2.imwrite(filename=out_mask_fn, img=mask_np)
def resize_frames(vid_root, args):
vid_name = os.path.basename(vid_root)
frames_dir = os.path.join(vid_root, args.images_resized)
os.makedirs(frames_dir, exist_ok=True)
files = sorted(
glob.glob(os.path.join(vid_root, args.image_input, '*.jpg')) +
glob.glob(os.path.join(vid_root, args.image_input, '*.png')))
print('Resizing images ...')
factor = 1.0
for file_ind, file in enumerate(tqdm(files, desc=f'imresize: {vid_name}')):
out_frame_fn = f'{frames_dir}/{file_ind:05}.png'
# skip if both the output frame and the mask exist
if os.path.exists(out_frame_fn) and not overwrite:
continue
im = cv2.imread(file)
# resize if too big
if im.shape[1] > args.max_width or im.shape[0] > args.max_height:
factor = max(im.shape[1] / args.max_width, im.shape[0] / args.max_height)
dsize = (int(im.shape[1] / factor), int(im.shape[0] / factor))
im = cv2.resize(src=im, dsize=dsize, interpolation=cv2.INTER_AREA)
cv2.imwrite(out_frame_fn, im)
return factor
def run_colmap(vid_root, args, factor, overwrite=False):
max_num_matches = 132768
overlap_frames = 75 # only used with sequential matching
os.makedirs(os.path.join(vid_root, 'sparse'), exist_ok=True)
extractor_cmd = f'''
colmap feature_extractor \
--database_path={vid_root}/database.db \
--image_path={vid_root}/{args.images_resized}\
--ImageReader.single_camera=1 \
--ImageReader.default_focal_length_factor=0.69388 \
--SiftExtraction.peak_threshold=0.004 \
--SiftExtraction.max_num_features=8192 \
--SiftExtraction.edge_threshold=16'''
if args.noradial:
extractor_cmd += ' --ImageReader.camera_model=SIMPLE_PINHOLE'
else:
extractor_cmd += ' --ImageReader.camera_model=SIMPLE_RADIAL'
if args.use_masks:
extractor_cmd += ' --ImageReader.mask_path={vid_root}/masks'
known_intrin = False
if args.known_intrin:
intrin_path = os.path.join(vid_root, 'intrinsics.txt')
if os.path.isfile(intrin_path):
known_intrin = True
print('Using known intrinsics')
intrins = np.loadtxt(intrin_path)
focal = (intrins[0, 0] + intrins[1, 1]) * 0.5 / factor
cx, cy = intrins[0, 2] / factor, intrins[1, 2] / factor
# f cx cy
if args.noradial:
extractor_cmd += f' --ImageReader.camera_params "{focal:.10f},{cx:.10f},{cy:.10f}"'
else:
extractor_cmd += f' --ImageReader.camera_params "{focal:.10f},{cx:.10f},{cy:.10f},0.0"'
else:
print('--known-intrin given but intrinsics.txt does not exist in data')
os.system(extractor_cmd)
if not args.do_sequential:
os.system(f'''
colmap exhaustive_matcher \
--database_path={vid_root}/database.db \
--SiftMatching.multiple_models=0 \
--SiftMatching.max_ratio=0.8 \
--SiftMatching.max_error=4.0 \
--SiftMatching.max_distance=0.7 \
--SiftMatching.max_num_matches={max_num_matches}''')
else:
warn("Using sequential matcher, which may be worse")
os.system(f'''
colmap sequential_matcher \
--database_path={vid_root}/database.db \
--SiftMatching.multiple_models=0 \
--SiftMatching.max_num_matches={max_num_matches} \
--SequentialMatching.overlap={overlap_frames} \
--SequentialMatching.quadratic_overlap=0 \
--SequentialMatching.loop_detection=1 \
--SequentialMatching.vocab_tree_path={args.colmap_root}/vocab_tree_flickr100K_words256K.bin'''
)
mapper_cmd = f'''
colmap mapper \
--database_path={vid_root}/database.db \
--image_path={vid_root}/{args.images_resized} \
--output_path={vid_root}/sparse '''
if known_intrin and args.fix_intrin:
mapper_cmd += f''' \
--Mapper.ba_refine_focal_length=0 \
--Mapper.ba_refine_principal_point=0 \
--Mapper.ba_refine_extra_params=0 '''
os.system(mapper_cmd)
if not args.noradial:
print("Warning: I've found the undistorter to work very poorly, substantially reducing quality.")
print("A potential (fairly easy) improvement is to support OPENCV camera model in the codebase, "
"and without doing undistorting.")
undist_dir = os.path.join(vid_root, args.undistorted_output)
if not os.path.exists(undist_dir) or overwrite:
os.makedirs(undist_dir, exist_ok=True)
os.system(f'''
colmap image_undistorter \
--input_path={vid_root}/sparse/0 \
--image_path={vid_root}/{args.images_resized} \
--output_path={vid_root} \
--output_type=COLMAP''')
def render_movie(vid_root, args):
vid_name = os.path.basename(os.path.abspath(vid_root))
files = sorted(glob.glob(os.path.join(vid_root, args.image_input , '*.png')) + glob.glob(os.path.join(vid_root, args.image_input , '*.jpg')))
movie_fn = os.path.join(vid_root, f'{vid_name}_debug.mp4')
# if os.path.exists(movie_fn):
# print(f'{movie_fn} exists, skipping')
# return
if not os.path.exists(os.path.join(vid_root, 'sparse', '0')):
print(f'{vid_name} colmap model does not exist')
return
debug_dir = os.path.join(vid_root, 'debug', 'frames')
os.makedirs(debug_dir, exist_ok=True)
obj_cameras, obj_points = read_colmap(
os.path.join(vid_root, 'sparse', '0'))
for file_idx, file in enumerate(tqdm(files, desc=f'render: {vid_name}')):
fn = os.path.basename(file)
im = cv2.imread(file)
if fn in obj_cameras:
obj_camera = obj_cameras[fn]
if obj_camera['model']=='SIMPLE_RADIAL':
im = cv2.undistort(
src=im,
cameraMatrix=obj_camera['npyIntrinsics'],
distCoeffs=(obj_camera['dblRadial'], obj_camera['dblRadial'], 0.0, 0.0))
elif obj_camera['model']=='SIMPLE_PINHOLE':
im = cv2.undistort(
src=im,
cameraMatrix=obj_camera['npyIntrinsics'],
distCoeffs=(0.0,0.0,0.0,0.0))
for obj_point in [obj_points[int_point] for int_point in obj_camera['intPoints']]:
npyPoint = numpy.append(obj_point['npyLocation'], 1.0)
npyPoint = numpy.matmul(obj_camera['npyIntrinsics'],
numpy.matmul(obj_camera['npyExtrinsics'], npyPoint))
if npyPoint[2] < 0.0000001: continue
intX, intY = int(round(npyPoint[0] / npyPoint[2])), int(
round(npyPoint[1] / npyPoint[2]))
if intX not in range(im.shape[1]) or intY not in range(im.shape[0]):
continue
cv2.circle(img=im, center=(intX, intY), radius=1, color=(255, 0, 255), thickness=2)
output_fn = f'{debug_dir}/{file_idx:05}.png'
cv2.imwrite(filename=output_fn, img=im)
# write movie
ffmpeg_params = [
'-crf', '5', '-pix_fmt', 'yuv420p', '-vf', 'pad=width=ceil(iw/2)*2:height=ceil(ih/2)*2'
]
moviepy.editor.ImageSequenceClip(
sequence=debug_dir, fps=25).write_videofile(
movie_fn, ffmpeg_params=ffmpeg_params)
def preprocess(vid_root, args):
print(f'processing: {vid_root}')
frames_dir = os.path.join(vid_root, args.image_input)
if not os.path.exists(frames_dir):
files = os.listdir(vid_root)
os.makedirs(frames_dir)
print(f'Moving images to {frames_dir}')
for fname in files:
src_path = os.path.join(vid_root, fname)
if not os.path.isfile(src_path):
continue
ext = os.path.splitext(fname)[1].upper()
if ext == '.PNG' or ext == '.JPG' or ext == '.JPEG' or ext == '.EXR':
os.rename(src_path, os.path.join(frames_dir, fname))
overwrite = True
factor = resize_frames(vid_root, args)
# colmap
if args.use_masks:
generate_masks(vid_root, args, overwrite=overwrite)
run_colmap(vid_root, args, factor, overwrite=overwrite)
if args.debug:
render_movie(vid_root, args) | null |
21,027 | import numpy as np
import os
import imageio
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]
return tt | null |
21,028 | import numpy as np
import os
import imageio
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def render_path_axis(c2w, up, ax, rad, focal, N):
render_poses = []
center = c2w[:, 3]
hwf = c2w[:, 4:5]
v = c2w[:, ax] * rad
for t in np.linspace(-1.0, 1.0, N + 1)[:-1]:
c = center + t * v
z = normalize(c - (center - focal * c2w[:, 2]))
# render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
render_poses.append(viewmatrix(z, up, c))
return render_poses | null |
21,029 | import numpy as np
import os
import imageio
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
def normalize(x):
def poses_avg(poses):
def render_path_spiral(c2w, up, rads, focal, zrate, rots, N):
def recenter_poses(poses):
def spherify_poses(poses, bds):
def load_llff_data(
basedir,
factor=None,
recenter=True,
bd_factor=0.75,
spherify=False,
# path_zflat=False,
split_train_val=8,
render_style="",
):
# poses, bds, imgs = _load_data(basedir, factor=factor) # factor=8 downsamples original imgs by 8x
poses, bds, intrinsic = _load_data(
basedir, factor=factor, load_imgs=False
) # factor=8 downsamples original imgs by 8x
print("Loaded LLFF data", basedir, bds.min(), bds.max())
# Correct rotation matrix ordering and move variable dim to axis 0
# poses [R | T] [3, 4, images]
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
# poses [3, 4, images] --> [images, 3, 4]
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
# imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
# images = imgs
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1.0 if bd_factor is None else 1.0 / (bds.min() * bd_factor)
poses[:, :3, 3] *= sc
bds *= sc
if recenter:
poses = recenter_poses(poses)
if spherify:
poses, render_poses, bds = spherify_poses(poses, bds)
else:
c2w = poses_avg(poses)
print("recentered", c2w.shape)
## Get spiral
# Get average pose
up = normalize(poses[:, :3, 1].sum(0))
close_depth, inf_depth = -1, -1
# Find a reasonable "focus depth" for this dataset
# if os.path.exists(os.path.join(basedir, "planes_spiral.txt")):
# with open(os.path.join(basedir, "planes_spiral.txt"), "r") as fi:
# data = [float(x) for x in fi.readline().split(" ")]
# dmin, dmax = data[:2]
# close_depth = dmin * 0.9
# inf_depth = dmax * 5.0
# elif os.path.exists(os.path.join(basedir, "planes.txt")):
# with open(os.path.join(basedir, "planes.txt"), "r") as fi:
# data = [float(x) for x in fi.readline().split(" ")]
# if len(data) == 3:
# dmin, dmax, invz = data
# elif len(data) == 4:
# dmin, dmax, invz, _ = data
# close_depth = dmin * 0.9
# inf_depth = dmax * 5.0
prev_close, prev_inf = close_depth, inf_depth
if close_depth < 0 or inf_depth < 0 or render_style == "llff":
close_depth, inf_depth = bds.min() * 0.9, bds.max() * 5.0
if render_style == "shiny":
close_depth, inf_depth = bds.min() * 0.9, bds.max() * 5.0
if close_depth < prev_close:
close_depth = prev_close
if inf_depth > prev_inf:
inf_depth = prev_inf
dt = 0.75
mean_dz = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth))
focal = mean_dz
# Get radii for spiral path
tt = poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 120
N_rots = 2
# if path_zflat:
# # zloc = np.percentile(tt, 10, 0)[2]
# zloc = -close_depth * 0.1
# c2w_path[:3, 3] = c2w_path[:3, 3] + zloc * c2w_path[:3, 2]
# rads[2] = 0.0
# N_rots = 1
# N_views /= 2
render_poses = render_path_spiral(
c2w_path, up, rads, focal, zrate=0.5, rots=N_rots, N=N_views
)
render_poses = np.array(render_poses).astype(np.float32)
# reference_view_id should stay in train set only
validation_ids = np.arange(poses.shape[0])
validation_ids[::split_train_val] = -1
validation_ids = validation_ids < 0
train_ids = np.logical_not(validation_ids)
train_poses = poses[train_ids]
train_bds = bds[train_ids]
c2w = poses_avg(train_poses)
dists = np.sum(np.square(c2w[:3, 3] - train_poses[:, :3, 3]), -1)
reference_view_id = np.argmin(dists)
reference_depth = train_bds[reference_view_id]
print(reference_depth)
return (
reference_depth,
reference_view_id,
render_poses,
poses,
intrinsic
) | null |
21,030 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
The provided code snippet includes necessary dependencies for implementing the `get_expon_lr_func` function. Write a Python function `def get_expon_lr_func( lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000 )` to solve the following problem:
Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input
Here is the function:
def get_expon_lr_func(
lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
):
"""
Continuous learning rate decay function. Adapted from JaxNeRF
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
:param conf: config subtree 'lr' or similar
:param max_steps: int, the number of steps during optimization.
:return HoF which takes step as input
"""
def helper(step):
if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
# Disable this parameter
return 0.0
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
t = np.clip(step / max_steps, 0, 1)
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
return delay_rate * log_lerp
return helper | Continuous learning rate decay function. Adapted from JaxNeRF The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. :param conf: config subtree 'lr' or similar :param max_steps: int, the number of steps during optimization. :return HoF which takes step as input |
21,031 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
The provided code snippet includes necessary dependencies for implementing the `save_img` function. Write a Python function `def save_img(img: np.ndarray, path: str)` to solve the following problem:
Save an image to disk. Image should have values in [0,1].
Here is the function:
def save_img(img: np.ndarray, path: str):
"""Save an image to disk. Image should have values in [0,1]."""
img = np.array((np.clip(img, 0.0, 1.0) * 255.0).astype(np.uint8))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(path, img) | Save an image to disk. Image should have values in [0,1]. |
21,032 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
The provided code snippet includes necessary dependencies for implementing the `xyz2equirect` function. Write a Python function `def xyz2equirect(bearings, rows, cols)` to solve the following problem:
Convert ray direction vectors into equirectangular pixel coordinates. Inverse of equirect2xyz. Taken from Vickie Ye
Here is the function:
def xyz2equirect(bearings, rows, cols):
"""
Convert ray direction vectors into equirectangular pixel coordinates.
Inverse of equirect2xyz.
Taken from Vickie Ye
"""
lat = np.arcsin(bearings[..., 1])
lon = np.arctan2(bearings[..., 0], bearings[..., 2])
x = cols * (0.5 + lon / 2 / np.pi)
y = rows * (0.5 - lat / np.pi)
return np.stack([x, y], axis=-1) | Convert ray direction vectors into equirectangular pixel coordinates. Inverse of equirect2xyz. Taken from Vickie Ye |
21,033 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
class Rays:
def to(self, *args, **kwargs):
def __getitem__(self, key):
def __len__(self):
def select_or_shuffle_rays(rays_init : Rays,
permutation: int = False,
epoch_size: Optional[int] = None,
device: Union[str, torch.device] = "cpu"):
n_rays = rays_init.origins.size(0)
n_samp = n_rays if (epoch_size is None) else epoch_size
if permutation:
print(" Shuffling rays")
indexer = torch.randperm(n_rays, device='cpu')[:n_samp]
else:
print(" Selecting random rays")
indexer = torch.randint(n_rays, (n_samp,), device='cpu')
return rays_init[indexer].to(device=device) | null |
21,034 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
The provided code snippet includes necessary dependencies for implementing the `compute_ssim` function. Write a Python function `def compute_ssim( img0, img1, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03, return_map=False, )` to solve the following problem:
Computes SSIM from two images. This function was modeled after tf.image.ssim, and should produce comparable output. Args: img0: torch.tensor. An image of size [..., width, height, num_channels]. img1: torch.tensor. An image of size [..., width, height, num_channels]. max_val: float > 0. The maximum magnitude that `img0` or `img1` can have. filter_size: int >= 1. Window size. filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering. k1: float > 0. One of the SSIM dampening parameters. k2: float > 0. One of the SSIM dampening parameters. return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned Returns: Each image's mean SSIM, or a tensor of individual values if `return_map`.
Here is the function:
def compute_ssim(
img0,
img1,
max_val=1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False,
):
"""Computes SSIM from two images.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Args:
img0: torch.tensor. An image of size [..., width, height, num_channels].
img1: torch.tensor. An image of size [..., width, height, num_channels].
max_val: float > 0. The maximum magnitude that `img0` or `img1` can have.
filter_size: int >= 1. Window size.
filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering.
k1: float > 0. One of the SSIM dampening parameters.
k2: float > 0. One of the SSIM dampening parameters.
return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
device = img0.device
ori_shape = img0.size()
width, height, num_channels = ori_shape[-3:]
img0 = img0.view(-1, width, height, num_channels).permute(0, 3, 1, 2)
img1 = img1.view(-1, width, height, num_channels).permute(0, 3, 1, 2)
batch_size = img0.shape[0]
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((torch.arange(filter_size, device=device) - hw + shift) / filter_sigma) ** 2
filt = torch.exp(-0.5 * f_i)
filt /= torch.sum(filt)
# Blur in x and y (faster than the 2D convolution).
# z is a tensor of size [B, H, W, C]
filt_fn1 = lambda z: F.conv2d(
z, filt.view(1, 1, -1, 1).repeat(num_channels, 1, 1, 1),
padding=[hw, 0], groups=num_channels)
filt_fn2 = lambda z: F.conv2d(
z, filt.view(1, 1, 1, -1).repeat(num_channels, 1, 1, 1),
padding=[0, hw], groups=num_channels)
# Vmap the blurs to the tensor size, and then compose them.
filt_fn = lambda z: filt_fn1(filt_fn2(z))
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0 ** 2) - mu00
sigma11 = filt_fn(img1 ** 2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = torch.clamp(sigma00, min=0.0)
sigma11 = torch.clamp(sigma11, min=0.0)
sigma01 = torch.sign(sigma01) * torch.min(
torch.sqrt(sigma00 * sigma11), torch.abs(sigma01)
)
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = torch.mean(ssim_map.reshape([-1, num_channels*width*height]), dim=-1)
return ssim_map if return_map else ssim | Computes SSIM from two images. This function was modeled after tf.image.ssim, and should produce comparable output. Args: img0: torch.tensor. An image of size [..., width, height, num_channels]. img1: torch.tensor. An image of size [..., width, height, num_channels]. max_val: float > 0. The maximum magnitude that `img0` or `img1` can have. filter_size: int >= 1. Window size. filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering. k1: float > 0. One of the SSIM dampening parameters. k2: float > 0. One of the SSIM dampening parameters. return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned Returns: Each image's mean SSIM, or a tensor of individual values if `return_map`. |
21,035 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
class Rays:
origins: Union[torch.Tensor, List[torch.Tensor]]
dirs: Union[torch.Tensor, List[torch.Tensor]]
gt: Union[torch.Tensor, List[torch.Tensor]]
def to(self, *args, **kwargs):
origins = self.origins.to(*args, **kwargs)
dirs = self.dirs.to(*args, **kwargs)
gt = self.gt.to(*args, **kwargs)
return Rays(origins, dirs, gt)
def __getitem__(self, key):
origins = self.origins[key]
dirs = self.dirs[key]
gt = self.gt[key]
return Rays(origins, dirs, gt)
def __len__(self):
return self.origins.size(0)
def equirect2xyz(uv, rows, cols):
"""
Convert equirectangular coordinate to unit vector,
inverse of xyz2equirect
Taken from Vickie Ye
Args:
uv: np.ndarray [..., 2] x, y coordinates in image space in [-1.0, 1.0]
Returns:
xyz: np.ndarray [..., 3] unit vectors
"""
lon = (uv[..., 0] * (1.0 / cols) - 0.5) * (2 * np.pi)
lat = -(uv[..., 1] * (1.0 / rows) - 0.5) * np.pi
coslat = np.cos(lat)
return np.stack(
[
coslat * np.sin(lon),
np.sin(lat),
coslat * np.cos(lon),
],
axis=-1,
)
The provided code snippet includes necessary dependencies for implementing the `generate_rays` function. Write a Python function `def generate_rays(w, h, focal, camtoworlds, equirect=False)` to solve the following problem:
Generate perspective camera rays. Principal point is at center. Args: w: int image width h: int image heigth focal: float real focal length camtoworlds: jnp.ndarray [B, 4, 4] c2w homogeneous poses equirect: if true, generates spherical rays instead of pinhole Returns: rays: Rays a namedtuple(origins [B, 3], directions [B, 3], viewdirs [B, 3])
Here is the function:
def generate_rays(w, h, focal, camtoworlds, equirect=False):
"""
Generate perspective camera rays. Principal point is at center.
Args:
w: int image width
h: int image heigth
focal: float real focal length
camtoworlds: jnp.ndarray [B, 4, 4] c2w homogeneous poses
equirect: if true, generates spherical rays instead of pinhole
Returns:
rays: Rays a namedtuple(origins [B, 3], directions [B, 3], viewdirs [B, 3])
"""
x, y = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking
np.arange(w, dtype=np.float32), # X-Axis (columns)
np.arange(h, dtype=np.float32), # Y-Axis (rows)
indexing="xy",
)
if equirect:
uv = np.stack([x * (2.0 / w) - 1.0, y * (2.0 / h) - 1.0], axis=-1)
camera_dirs = equirect2xyz(uv)
else:
camera_dirs = np.stack(
[
(x - w * 0.5) / focal,
-(y - h * 0.5) / focal,
-np.ones_like(x),
],
axis=-1,
)
# camera_dirs = camera_dirs / np.linalg.norm(camera_dirs, axis=-1, keepdims=True)
c2w = camtoworlds[:, None, None, :3, :3]
camera_dirs = camera_dirs[None, Ellipsis, None]
directions = np.matmul(c2w, camera_dirs)[Ellipsis, 0]
origins = np.broadcast_to(
camtoworlds[:, None, None, :3, -1], directions.shape
)
norms = np.linalg.norm(directions, axis=-1, keepdims=True)
viewdirs = directions / norms
rays = Rays(
origins=origins, directions=directions, viewdirs=viewdirs
)
return rays | Generate perspective camera rays. Principal point is at center. Args: w: int image width h: int image heigth focal: float real focal length camtoworlds: jnp.ndarray [B, 4, 4] c2w homogeneous poses equirect: if true, generates spherical rays instead of pinhole Returns: rays: Rays a namedtuple(origins [B, 3], directions [B, 3], viewdirs [B, 3]) |
21,036 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
The provided code snippet includes necessary dependencies for implementing the `similarity_from_cameras` function. Write a Python function `def similarity_from_cameras(c2w)` to solve the following problem:
Get a similarity transform to normalize dataset from c2w (OpenCV convention) cameras :param c2w: (N, 4) :return T (4,4) , scale (float)
Here is the function:
def similarity_from_cameras(c2w):
"""
Get a similarity transform to normalize dataset
from c2w (OpenCV convention) cameras
:param c2w: (N, 4)
:return T (4,4) , scale (float)
"""
t = c2w[:, :3, 3]
R = c2w[:, :3, :3]
# (1) Rotate the world so that z+ is the up axis
# we estimate the up axis by averaging the camera up axes
ups = np.sum(R * np.array([0, -1.0, 0]), axis=-1)
world_up = np.mean(ups, axis=0)
world_up /= np.linalg.norm(world_up)
up_camspace = np.array([0.0, -1.0, 0.0])
c = (up_camspace * world_up).sum()
cross = np.cross(world_up, up_camspace)
skew = np.array([[0.0, -cross[2], cross[1]],
[cross[2], 0.0, -cross[0]],
[-cross[1], cross[0], 0.0]])
if c > -1:
R_align = np.eye(3) + skew + (skew @ skew) * 1 / (1+c)
else:
# In the unlikely case the original data has y+ up axis,
# rotate 180-deg about x axis
R_align = np.array([[-1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
# R_align = np.eye(3) # DEBUG
R = (R_align @ R)
fwds = np.sum(R * np.array([0, 0.0, 1.0]), axis=-1)
t = (R_align @ t[..., None])[..., 0]
# (2) Recenter the scene using camera center rays
# find the closest point to the origin for each camera's center ray
nearest = t + (fwds * -t).sum(-1)[:, None] * fwds
# median for more robustness
translate = -np.median(nearest, axis=0)
# translate = -np.mean(t, axis=0) # DEBUG
transform = np.eye(4)
transform[:3, 3] = translate
transform[:3, :3] = R_align
# (3) Rescale the scene using camera distances
scale = 1.0 / np.median(np.linalg.norm(t + translate, axis=-1))
return transform, scale | Get a similarity transform to normalize dataset from c2w (OpenCV convention) cameras :param c2w: (N, 4) :return T (4,4) , scale (float) |
21,037 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
The provided code snippet includes necessary dependencies for implementing the `jiggle_and_interp_poses` function. Write a Python function `def jiggle_and_interp_poses(poses : torch.Tensor, n_inter: int, noise_std : float=0.0)` to solve the following problem:
For generating a novel trajectory close to known trajectory :param poses: torch.Tensor (B, 4, 4) :param n_inter: int, number of views to interpolate in total :param noise_std: float, default 0
Here is the function:
def jiggle_and_interp_poses(poses : torch.Tensor,
n_inter: int,
noise_std : float=0.0):
"""
For generating a novel trajectory close to known trajectory
:param poses: torch.Tensor (B, 4, 4)
:param n_inter: int, number of views to interpolate in total
:param noise_std: float, default 0
"""
n_views_in = poses.size(0)
poses_np = poses.cpu().numpy().copy()
rot = Rotation.from_matrix(poses_np[:, :3, :3])
trans = poses_np[:, :3, 3]
trans += np.random.randn(*trans.shape) * noise_std
pose_quat = rot.as_quat()
t_in = np.arange(n_views_in, dtype=np.float32)
t_out = np.linspace(t_in[0], t_in[-1], n_inter, dtype=np.float32)
q_new = CubicSpline(t_in, pose_quat)
q_new : np.ndarray = q_new(t_out)
q_new = q_new / np.linalg.norm(q_new, axis=-1)[..., None]
t_new = CubicSpline(t_in, trans)
t_new = t_new(t_out)
rot_new = Rotation.from_quat(q_new)
R_new = rot_new.as_matrix()
Rt_new = np.concatenate([R_new, t_new[..., None]], axis=-1)
bottom = np.array([[0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
bottom = bottom[None].repeat(Rt_new.shape[0], 0)
Rt_new = np.concatenate([Rt_new, bottom], axis=-2)
Rt_new = torch.from_numpy(Rt_new).to(device=poses.device, dtype=poses.dtype)
return Rt_new | For generating a novel trajectory close to known trajectory :param poses: torch.Tensor (B, 4, 4) :param n_inter: int, number of views to interpolate in total :param noise_std: float, default 0 |
21,038 | import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
def _trans_t(t):
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1],
],
dtype=np.float32,
)
def _rot_phi(phi):
return np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1],
],
dtype=np.float32,
)
def _rot_theta(th):
return np.array(
[
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1],
],
dtype=np.float32,
)
The provided code snippet includes necessary dependencies for implementing the `pose_spherical` function. Write a Python function `def pose_spherical(theta : float, phi : float, radius : float, offset : Optional[np.ndarray]=None, vec_up : Optional[np.ndarray]=None)` to solve the following problem:
Generate spherical rendering poses, from NeRF. Forgive the code horror :return: r (3,), t (3,)
Here is the function:
def pose_spherical(theta : float, phi : float, radius : float, offset : Optional[np.ndarray]=None,
vec_up : Optional[np.ndarray]=None):
"""
Generate spherical rendering poses, from NeRF. Forgive the code horror
:return: r (3,), t (3,)
"""
c2w = _trans_t(radius)
c2w = _rot_phi(phi / 180.0 * np.pi) @ c2w
c2w = _rot_theta(theta / 180.0 * np.pi) @ c2w
c2w = (
np.array(
[[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
dtype=np.float32,
)
@ c2w
)
if vec_up is not None:
vec_up = vec_up / np.linalg.norm(vec_up)
vec_1 = np.array([vec_up[0], -vec_up[2], vec_up[1]])
vec_2 = np.cross(vec_up, vec_1)
trans = np.eye(4, 4, dtype=np.float32)
trans[:3, 0] = vec_1
trans[:3, 1] = vec_2
trans[:3, 2] = vec_up
c2w = trans @ c2w
c2w = c2w @ np.diag(np.array([1, -1, -1, 1], dtype=np.float32))
if offset is not None:
c2w[:3, 3] += offset
return c2w | Generate spherical rendering poses, from NeRF. Forgive the code horror :return: r (3,), t (3,) |
21,039 | from scipy.spatial.transform import Rotation
import struct
import json
import glob
import copy
import numpy as np
import os
import torch
import torch.nn.functional as F
from collections import deque
from tqdm import tqdm
import imageio
import cv2
from .util import Rays, Intrin
from .dataset_base import DatasetBase
from .load_llff import load_llff_data
from typing import Union, Optional
from svox2.utils import convert_to_ndc
def readCameraDeepview(dataset):
cams = {}
imgs = {}
with open(os.path.join(dataset, "models.json"), "r") as fi:
js = json.load(fi)
for i, cam in enumerate(js):
for j, cam_info in enumerate(cam):
img_id = cam_info["relative_path"]
cam_id = img_id.split("/")[0]
rotation = (
Rotation.from_rotvec(np.float32(cam_info["orientation"]))
.as_matrix()
.astype(np.float32)
)
position = np.array([cam_info["position"]], dtype="f").reshape(3, 1)
if i == 0:
cams[cam_id] = {
"width": int(cam_info["width"]),
"height": int(cam_info["height"]),
"fx": cam_info["focal_length"],
"fy": cam_info["focal_length"] * cam_info["pixel_aspect_ratio"],
"px": cam_info["principal_point"][0],
"py": cam_info["principal_point"][1],
}
imgs[img_id] = {
"camera_id": cam_id,
"r": rotation,
"t": -np.matmul(rotation, position),
"R": rotation.transpose(),
"center": position,
"path": cam_info["relative_path"],
}
return cams, imgs | null |
21,040 | from scipy.spatial.transform import Rotation
import struct
import json
import glob
import copy
import numpy as np
import os
import torch
import torch.nn.functional as F
from collections import deque
from tqdm import tqdm
import imageio
import cv2
from .util import Rays, Intrin
from .dataset_base import DatasetBase
from .load_llff import load_llff_data
from typing import Union, Optional
from svox2.utils import convert_to_ndc
def readImagesBinary(path):
images = {}
f = open(path, "rb")
num_reg_images = struct.unpack("Q", f.read(8))[0]
for i in range(num_reg_images):
image_id = struct.unpack("I", f.read(4))[0]
qv = np.fromfile(f, np.double, 4)
tv = np.fromfile(f, np.double, 3)
camera_id = struct.unpack("I", f.read(4))[0]
name = ""
name_char = -1
while name_char != b"\x00":
name_char = f.read(1)
if name_char != b"\x00":
name += name_char.decode("ascii")
num_points2D = struct.unpack("Q", f.read(8))[0]
for i in range(num_points2D):
f.read(8 * 2) # for x and y
f.read(8) # for point3d Iid
r = Rotation.from_quat([qv[1], qv[2], qv[3], qv[0]]).as_dcm().astype(np.float32)
t = tv.astype(np.float32).reshape(3, 1)
R = np.transpose(r)
center = -R @ t
# storage is scalar first, from_quat takes scalar last.
images[image_id] = {
"camera_id": camera_id,
"r": r,
"t": t,
"R": R,
"center": center,
"path": "dense/images/" + name,
}
f.close()
return images | null |
21,041 | from scipy.spatial.transform import Rotation
import struct
import json
import glob
import copy
import numpy as np
import os
import torch
import torch.nn.functional as F
from collections import deque
from tqdm import tqdm
import imageio
import cv2
from .util import Rays, Intrin
from .dataset_base import DatasetBase
from .load_llff import load_llff_data
from typing import Union, Optional
from svox2.utils import convert_to_ndc
def readCamerasBinary(path):
cams = {}
f = open(path, "rb")
num_cameras = struct.unpack("Q", f.read(8))[0]
# becomes pinhole camera model , 4 parameters
for i in range(num_cameras):
camera_id = struct.unpack("I", f.read(4))[0]
model_id = struct.unpack("i", f.read(4))[0]
width = struct.unpack("Q", f.read(8))[0]
height = struct.unpack("Q", f.read(8))[0]
fx = struct.unpack("d", f.read(8))[0]
fy = struct.unpack("d", f.read(8))[0]
px = struct.unpack("d", f.read(8))[0]
py = struct.unpack("d", f.read(8))[0]
cams[camera_id] = {
"width": width,
"height": height,
"fx": fx,
"fy": fy,
"px": px,
"py": py,
}
# fx, fy, cx, cy
f.close()
return cams | null |
21,042 | from scipy.spatial.transform import Rotation
import struct
import json
import glob
import copy
import numpy as np
import os
import torch
import torch.nn.functional as F
from collections import deque
from tqdm import tqdm
import imageio
import cv2
from .util import Rays, Intrin
from .dataset_base import DatasetBase
from .load_llff import load_llff_data
from typing import Union, Optional
from svox2.utils import convert_to_ndc
def buildCamera(W, H, fx, fy, cx, cy):
return {
"width": int(W),
"height": int(H),
"fx": float(fx),
"fy": float(fy),
"px": float(cx),
"py": float(cy),
} | null |
21,043 | from scipy.spatial.transform import Rotation
import struct
import json
import glob
import copy
import numpy as np
import os
import torch
import torch.nn.functional as F
from collections import deque
from tqdm import tqdm
import imageio
import cv2
from .util import Rays, Intrin
from .dataset_base import DatasetBase
from .load_llff import load_llff_data
from typing import Union, Optional
from svox2.utils import convert_to_ndc
def nerf_pose_to_ours(cam):
R = cam[:3, :3]
center = cam[:3, 3].reshape([3, 1])
center[1:] *= -1
R[1:, 0] *= -1
R[0, 1:] *= -1
r = np.transpose(R)
t = -r @ center
return R, center, r, t
def buildNerfPoses(poses, images_path=None):
output = {}
for poses_id in range(poses.shape[0]):
R, center, r, t = nerf_pose_to_ours(poses[poses_id].astype(np.float32))
output[poses_id] = {"camera_id": 0, "r": r, "t": t, "R": R, "center": center}
if images_path is not None:
output[poses_id]["path"] = images_path[poses_id]
return output | null |
21,044 | import torch
import argparse
from util.dataset import datasets
import json
datasets = {
'nerf': NeRFDataset,
'llff': LLFFDataset,
'nsvf': NSVFDataset,
'co3d': CO3DDataset,
'auto': auto_dataset
}
def define_common_args(parser : argparse.ArgumentParser):
parser.add_argument('data_dir', type=str)
parser.add_argument('--config', '-c',
type=str,
default=None,
help="Config yaml file (will override args)")
group = parser.add_argument_group("Data loading")
group.add_argument('--dataset_type',
choices=list(datasets.keys()) + ["auto"],
default="auto",
help="Dataset type (specify type or use auto)")
group.add_argument('--scene_scale',
type=float,
default=None,
help="Global scene scaling (or use dataset default)")
group.add_argument('--scale',
type=float,
default=None,
help="Image scale, e.g. 0.5 for half resolution (or use dataset default)")
group.add_argument('--seq_id',
type=int,
default=1000,
help="Sequence ID (for CO3D only)")
group.add_argument('--epoch_size',
type=int,
default=12800,
help="Pseudo-epoch size in term of batches (to be consistent across datasets)")
group.add_argument('--white_bkgd',
type=bool,
default=True,
help="Whether to use white background (ignored in some datasets)")
group.add_argument('--llffhold',
type=int,
default=8,
help="LLFF holdout every")
group.add_argument('--normalize_by_bbox',
type=bool,
default=False,
help="Normalize by bounding box in bbox.txt, if available (NSVF dataset only); precedes normalize_by_camera")
group.add_argument('--data_bbox_scale',
type=float,
default=1.2,
help="Data bbox scaling (NSVF dataset only)")
group.add_argument('--cam_scale_factor',
type=float,
default=0.95,
help="Camera autoscale factor (NSVF/CO3D dataset only)")
group.add_argument('--normalize_by_camera',
type=bool,
default=True,
help="Normalize using cameras, assuming a 360 capture (NSVF dataset only); only used if not normalize_by_bbox")
group.add_argument('--perm', action='store_true', default=False,
help='sample by permutation of rays (true epoch) instead of '
'uniformly random rays')
group = parser.add_argument_group("Render options")
group.add_argument('--step_size',
type=float,
default=0.5,
help="Render step size (in voxel size units)")
group.add_argument('--sigma_thresh',
type=float,
default=1e-8,
help="Skips voxels with sigma < this")
group.add_argument('--stop_thresh',
type=float,
default=1e-7,
help="Ray march stopping threshold")
group.add_argument('--background_brightness',
type=float,
default=1.0,
help="Brightness of the infinite background")
group.add_argument('--renderer_backend', '-B',
choices=['cuvol', 'svox1', 'nvol'],
default='cuvol',
help="Renderer backend")
group.add_argument('--random_sigma_std',
type=float,
default=0.0,
help="Random Gaussian std to add to density values (only if enable_random)")
group.add_argument('--random_sigma_std_background',
type=float,
default=0.0,
help="Random Gaussian std to add to density values for BG (only if enable_random)")
group.add_argument('--near_clip',
type=float,
default=0.00,
help="Near clip distance (in world space distance units, only for FG)")
group.add_argument('--use_spheric_clip',
action='store_true',
default=False,
help="Use spheric ray clipping instead of voxel grid AABB "
"(only for FG; changes near_clip to mean 1-near_intersection_radius; "
"far intersection is always at radius 1)")
group.add_argument('--enable_random',
action='store_true',
default=False,
help="Random Gaussian std to add to density values")
group.add_argument('--last_sample_opaque',
action='store_true',
default=False,
help="Last sample has +1e9 density (used for LLFF)") | null |
21,045 | import torch
import argparse
from util.dataset import datasets
import json
The provided code snippet includes necessary dependencies for implementing the `build_data_options` function. Write a Python function `def build_data_options(args)` to solve the following problem:
Arguments to pass as kwargs to the dataset constructor
Here is the function:
def build_data_options(args):
"""
Arguments to pass as kwargs to the dataset constructor
"""
return {
'dataset_type': args.dataset_type,
'seq_id': args.seq_id,
'epoch_size': args.epoch_size * args.__dict__.get('batch_size', 5000),
'scene_scale': args.scene_scale,
'scale': args.scale,
'white_bkgd': args.white_bkgd,
'hold_every': args.llffhold,
'normalize_by_bbox': args.normalize_by_bbox,
'data_bbox_scale': args.data_bbox_scale,
'cam_scale_factor': args.cam_scale_factor,
'normalize_by_camera': args.normalize_by_camera,
'permutation': args.perm
} | Arguments to pass as kwargs to the dataset constructor |
21,046 | import torch
import argparse
from util.dataset import datasets
import json
The provided code snippet includes necessary dependencies for implementing the `maybe_merge_config_file` function. Write a Python function `def maybe_merge_config_file(args, allow_invalid=False)` to solve the following problem:
Load json config file if specified and merge the arguments
Here is the function:
def maybe_merge_config_file(args, allow_invalid=False):
"""
Load json config file if specified and merge the arguments
"""
if args.config is not None:
with open(args.config, "r") as config_file:
configs = json.load(config_file)
invalid_args = list(set(configs.keys()) - set(dir(args)))
if invalid_args and not allow_invalid:
raise ValueError(f"Invalid args {invalid_args} in {args.config}.")
args.__dict__.update(configs) | Load json config file if specified and merge the arguments |
21,047 | import torch
import argparse
from util.dataset import datasets
import json
The provided code snippet includes necessary dependencies for implementing the `setup_render_opts` function. Write a Python function `def setup_render_opts(opt, args)` to solve the following problem:
Pass render arguments to the SparseGrid renderer options
Here is the function:
def setup_render_opts(opt, args):
"""
Pass render arguments to the SparseGrid renderer options
"""
opt.step_size = args.step_size
opt.sigma_thresh = args.sigma_thresh
opt.stop_thresh = args.stop_thresh
opt.background_brightness = args.background_brightness
opt.backend = args.renderer_backend
opt.random_sigma_std = args.random_sigma_std
opt.random_sigma_std_background = args.random_sigma_std_background
opt.last_sample_opaque = args.last_sample_opaque
opt.near_clip = args.near_clip
opt.use_spheric_clip = args.use_spheric_clip | Pass render arguments to the SparseGrid renderer options |
21,048 | from .nerf_dataset import NeRFDataset
from .llff_dataset import LLFFDataset
from .nsvf_dataset import NSVFDataset
from .co3d_dataset import CO3DDataset
from os import path
class NeRFDataset(DatasetBase):
def __init__(
self,
root,
split,
epoch_size : Optional[int] = None,
device: Union[str, torch.device] = "cpu",
scene_scale: Optional[float] = None,
factor: int = 1,
scale : Optional[float] = None,
permutation: bool = True,
white_bkgd: bool = True,
n_images = None,
**kwargs
): # Give warning
class LLFFDataset(DatasetBase):
def __init__(
self,
root : str,
split : str,
epoch_size : Optional[int] = None,
device: Union[str, torch.device] = "cpu",
permutation: bool = True,
factor: int = 1,
ref_img: str="",
scale : Optional[float]=1.0/4.0, # 4x downsample
dmin : float=-1,
dmax : int=-1,
invz : int= 0,
transform=None,
render_style="",
hold_every=8,
offset=250,
**kwargs
):
def _load_images(self):
def gen_rays(self, factor=1):
class NSVFDataset(DatasetBase):
def __init__(
self,
root,
split,
epoch_size : Optional[int] = None,
device: Union[str, torch.device] = "cpu",
scene_scale: Optional[float] = None, # Scene scaling
factor: int = 1, # Image scaling (on ray gen; use gen_rays(factor) to dynamically change scale)
scale : Optional[float] = 1.0, # Image scaling (on load)
permutation: bool = True,
white_bkgd: bool = True,
normalize_by_bbox: bool = False,
data_bbox_scale : float = 1.1, # Only used if normalize_by_bbox
cam_scale_factor : float = 0.95,
normalize_by_camera: bool = True,
**kwargs
):
def sort_key(x):
def look_for_dir(cands, required=True):
class CO3DDataset(DatasetBase):
def __init__(
self,
root,
split,
seq_id : Optional[int] = None,
epoch_size : Optional[int] = None,
permutation: bool = True,
device: Union[str, torch.device] = "cpu",
max_image_dim: int = 800,
max_pose_dist: float = 5.0,
cam_scale_factor: float = 0.95,
hold_every=8,
**kwargs,
):
def load_sequence(self, sequence_id : int):
def gen_rays(self, factor=1):
def auto_dataset(root : str, *args, **kwargs):
if path.isfile(path.join(root, 'apple', 'eval_batches_multisequence.json')):
print("Detected CO3D dataset")
return CO3DDataset(root, *args, **kwargs)
elif path.isfile(path.join(root, 'poses_bounds.npy')):
print("Detected LLFF dataset")
return LLFFDataset(root, *args, **kwargs)
elif path.isfile(path.join(root, 'transforms.json')) or \
path.isfile(path.join(root, 'transforms_train.json')):
print("Detected NeRF (Blender) dataset")
return NeRFDataset(root, *args, **kwargs)
else:
print("Defaulting to extended NSVF dataset")
return NSVFDataset(root, *args, **kwargs) | null |
21,049 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def inthroot(x : int, n : int):
if x <= 0:
return None
lo, hi = 1, x
while lo <= hi:
mi = lo + (hi - lo) // 2
p = mi ** n
if p == x:
return mi
elif p > x:
hi = mi - 1
else:
lo = mi + 1
return None | null |
21,050 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def _get_c_extension():
from warnings import warn
try:
import svox2.csrc as _C
if not hasattr(_C, "sample_grid"):
_C = None
except:
_C = None
if _C is None:
warn("CUDA extension svox2.csrc could not be loaded! " +
"Operations will be slow.\n" +
"Please do not import svox in the svox2 source directory.")
return _C | null |
21,051 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def _unexpand_bits(v):
v &= 0x49249249
v = (v | (v >> 2)) & 0xc30c30c3
v = (v | (v >> 4)) & 0xf00f00f
v = (v | (v >> 8)) & 0xff0000ff
v = (v | (v >> 16)) & 0x0000ffff
return v
def inv_morton_code_3(code):
x = _unexpand_bits(code >> 2)
y = _unexpand_bits(code >> 1)
z = _unexpand_bits(code)
return x, y, z | null |
21,052 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def is_pow2(x : int):
def morton_code_3(x, y, z):
def gen_morton(D, device='cpu', dtype=torch.long):
assert is_pow2(D), "Morton code requires power of 2 reso"
arr = torch.arange(D, device=device, dtype=dtype)
X, Y, Z = torch.meshgrid(arr, arr, arr)
mort = morton_code_3(X, Y, Z)
return mort | null |
21,053 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
SH_C0 = 0.28209479177387814
SH_C1 = 0.4886025119029199
SH_C2 = [
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396
]
SH_C3 = [
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435
]
SH_C4 = [
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
]
The provided code snippet includes necessary dependencies for implementing the `eval_sh_bases` function. Write a Python function `def eval_sh_bases(basis_dim : int, dirs : torch.Tensor)` to solve the following problem:
Evaluate spherical harmonics bases at unit directions, without taking linear combination. At each point, the final result may the be obtained through simple multiplication. :param basis_dim: int SH basis dim. Currently, 1-25 square numbers supported :param dirs: torch.Tensor (..., 3) unit directions :return: torch.Tensor (..., basis_dim)
Here is the function:
def eval_sh_bases(basis_dim : int, dirs : torch.Tensor):
"""
Evaluate spherical harmonics bases at unit directions,
without taking linear combination.
At each point, the final result may the be
obtained through simple multiplication.
:param basis_dim: int SH basis dim. Currently, 1-25 square numbers supported
:param dirs: torch.Tensor (..., 3) unit directions
:return: torch.Tensor (..., basis_dim)
"""
result = torch.empty((*dirs.shape[:-1], basis_dim), dtype=dirs.dtype, device=dirs.device)
result[..., 0] = SH_C0
if basis_dim > 1:
x, y, z = dirs.unbind(-1)
result[..., 1] = -SH_C1 * y;
result[..., 2] = SH_C1 * z;
result[..., 3] = -SH_C1 * x;
if basis_dim > 4:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result[..., 4] = SH_C2[0] * xy;
result[..., 5] = SH_C2[1] * yz;
result[..., 6] = SH_C2[2] * (2.0 * zz - xx - yy);
result[..., 7] = SH_C2[3] * xz;
result[..., 8] = SH_C2[4] * (xx - yy);
if basis_dim > 9:
result[..., 9] = SH_C3[0] * y * (3 * xx - yy);
result[..., 10] = SH_C3[1] * xy * z;
result[..., 11] = SH_C3[2] * y * (4 * zz - xx - yy);
result[..., 12] = SH_C3[3] * z * (2 * zz - 3 * xx - 3 * yy);
result[..., 13] = SH_C3[4] * x * (4 * zz - xx - yy);
result[..., 14] = SH_C3[5] * z * (xx - yy);
result[..., 15] = SH_C3[6] * x * (xx - 3 * yy);
if basis_dim > 16:
result[..., 16] = SH_C4[0] * xy * (xx - yy);
result[..., 17] = SH_C4[1] * yz * (3 * xx - yy);
result[..., 18] = SH_C4[2] * xy * (7 * zz - 1);
result[..., 19] = SH_C4[3] * yz * (7 * zz - 3);
result[..., 20] = SH_C4[4] * (zz * (35 * zz - 30) + 3);
result[..., 21] = SH_C4[5] * xz * (7 * zz - 3);
result[..., 22] = SH_C4[6] * (xx - yy) * (7 * zz - 1);
result[..., 23] = SH_C4[7] * xz * (xx - 3 * yy);
result[..., 24] = SH_C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy));
return result | Evaluate spherical harmonics bases at unit directions, without taking linear combination. At each point, the final result may the be obtained through simple multiplication. :param basis_dim: int SH basis dim. Currently, 1-25 square numbers supported :param dirs: torch.Tensor (..., 3) unit directions :return: torch.Tensor (..., basis_dim) |
21,054 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
class CubemapCoord:
ax : torch.Tensor
ori : torch.Tensor
u : torch.Tensor
v : torch.Tensor
def query_in(self, cubemap : torch.Tensor):
face = self.ax * 2 + self.ori
# print(cubemap.shape, face.min(), face.max(), ' ',
# self.u.min(), self.u.max(),
if cubemap.ndim == 4:
return cubemap[face, self.u, self.v]
else:
return cubemap[torch.arange(face.size(0), device=face.device), face, self.u, self.v]
def clone(self):
return CubemapCoord(
self.ax.clone(),
self.ori.clone(),
self.u.clone(),
self.v.clone())
The provided code snippet includes necessary dependencies for implementing the `dir_to_cubemap_coord` function. Write a Python function `def dir_to_cubemap_coord(xyz : torch.Tensor, face_reso : int, eac : bool = True) -> CubemapCoord` to solve the following problem:
Convert a direction on a sphere (not necessarily normalized) :param xyz: direction (not necessarily normalized) :param face_reso: int, resolution of cubemap face :param eac: bool, if true (default) then uses equi-angular cubemaps (EAC) instead of standard cubemap; see https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ :return: CubemapCoord
Here is the function:
def dir_to_cubemap_coord(xyz : torch.Tensor,
face_reso : int,
eac : bool = True) -> CubemapCoord:
"""
Convert a direction on a sphere (not necessarily normalized)
:param xyz: direction (not necessarily normalized)
:param face_reso: int, resolution of cubemap face
:param eac: bool, if true (default) then uses equi-angular cubemaps (EAC)
instead of standard cubemap; see
https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/
:return: CubemapCoord
"""
xyz = xyz.float()
maxv, ax = torch.max(torch.abs(xyz), dim=-1)
xyz = xyz * (1.0 / maxv.unsqueeze(-1))
if eac:
xyz_eac = torch.atan(xyz) * (4 / math.pi)
else:
xyz_eac = xyz
arr = torch.arange(ax.size(0))
ud = (ax ^ 1) & 1
vd = (ax ^ 2) & 2
u_eac = xyz_eac[arr, ud]
v_eac = xyz_eac[arr, vd]
ori = (xyz_eac[arr, ax] >= 0).long()
u = ((u_eac + 1) * face_reso - 1.0) * 0.5
v = ((v_eac + 1) * face_reso - 1.0) * 0.5
return CubemapCoord(ax, ori, u, v) | Convert a direction on a sphere (not necessarily normalized) :param xyz: direction (not necessarily normalized) :param face_reso: int, resolution of cubemap face :param eac: bool, if true (default) then uses equi-angular cubemaps (EAC) instead of standard cubemap; see https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ :return: CubemapCoord |
21,055 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
class CubemapCoord:
ax : torch.Tensor
ori : torch.Tensor
u : torch.Tensor
v : torch.Tensor
def query_in(self, cubemap : torch.Tensor):
face = self.ax * 2 + self.ori
# print(cubemap.shape, face.min(), face.max(), ' ',
# self.u.min(), self.u.max(),
if cubemap.ndim == 4:
return cubemap[face, self.u, self.v]
else:
return cubemap[torch.arange(face.size(0), device=face.device), face, self.u, self.v]
def clone(self):
return CubemapCoord(
self.ax.clone(),
self.ori.clone(),
self.u.clone(),
self.v.clone())
class CubemapBilerpQuery:
i00: CubemapCoord
i01: CubemapCoord
i10: CubemapCoord
i11: CubemapCoord
du: torch.Tensor
dv: torch.Tensor
The provided code snippet includes necessary dependencies for implementing the `cubemap_build_query` function. Write a Python function `def cubemap_build_query(idx : CubemapCoord, face_reso : int, mode : str = 'linear') -> CubemapBilerpQuery` to solve the following problem:
Compute the points on the cubemap for bilinear sampling given a cubemap coordinate from dir_to_cubemap_coord; to be used with cubemap_sample. :param idx: CubemapCoord, cube map coordinate from dir_to_cubemap_coord :param face_reso: int, resolution of cubemap face :param mode: str, interpolation mode; one of nearest, linear_simple, linear; linear_simple interpolates per-face, while linear also interpolates across edges (this is the only one supported in CUDA) :return: CubemapBilerpQuery
Here is the function:
def cubemap_build_query(idx : CubemapCoord, face_reso : int,
mode : str = 'linear') -> CubemapBilerpQuery:
"""
Compute the points on the cubemap for bilinear sampling
given a cubemap coordinate from dir_to_cubemap_coord;
to be used with cubemap_sample.
:param idx: CubemapCoord, cube map coordinate from dir_to_cubemap_coord
:param face_reso: int, resolution of cubemap face
:param mode: str, interpolation mode; one of nearest, linear_simple, linear;
linear_simple interpolates per-face, while linear also
interpolates across edges (this is the only one supported
in CUDA)
:return: CubemapBilerpQuery
"""
if mode == 'nearest':
uf = torch.floor(idx.u + 0.5).long().clamp_(0, face_reso - 1)
vf = torch.floor(idx.v + 0.5).long().clamp_(0, face_reso - 1)
idx_ul = CubemapCoord(idx.ax, idx.ori, uf, vf)
# Corner: triple average
return CubemapBilerpQuery(idx_ul, idx_ul, idx_ul, idx_ul,
torch.zeros_like(idx.u), torch.zeros_like(idx.v))
elif mode == 'linear_simple':
u = idx.u.clamp(0, face_reso - 2)
v = idx.v.clamp(0, face_reso - 2)
uf = torch.floor(u).long()
vf = torch.floor(v).long()
uc = uf + 1
vc = vf + 1
du = u - uf
dv = v - vf
return CubemapBilerpQuery(
CubemapCoord(idx.ax, idx.ori, uf, vf),
CubemapCoord(idx.ax, idx.ori, uf, vc),
CubemapCoord(idx.ax, idx.ori, uc, vf),
CubemapCoord(idx.ax, idx.ori, uc, vc),
du, dv)
elif mode == 'linear':
uf = torch.floor(idx.u).long()
vf = torch.floor(idx.v).long()
uc = uf + 1
vc = vf + 1
m0u = uf < 0
m0v = vf < 0
m1u = uc > (face_reso - 1)
m1v = vc > (face_reso - 1)
ud = (idx.ax ^ 1) & 1
vd = (idx.ax ^ 2) & 2
def _index_across_sides(nidx : CubemapCoord, uori, vori, mu, mv):
mdiagonal = mu & mv
# FIXME not quite correct (matches CUDA impl)
mu = mu & (~mdiagonal)
mv = mv & (~mdiagonal)
nidx.u[mdiagonal] = nidx.u[mdiagonal].clamp(0, face_reso - 1)
nidx.v[mdiagonal] = nidx.v[mdiagonal].clamp(0, face_reso - 1)
def _index_across_one_side(mask, d, ori, other_coord):
nax = d[mask]
nud = (nax ^ 1) & 1
# nvd = (nax ^ 2) & 2
ax_is_u = nud == nidx.ax[mask]
ax_is_v = ~ax_is_u
ax_is_u_m = torch.zeros_like(mask)
ax_is_u_m[mask] = ax_is_u
ax_is_v_m = torch.zeros_like(mask)
ax_is_v_m[mask] = ax_is_v
nidx.u[ax_is_v_m] = other_coord[ax_is_v_m]
nidx.v[ax_is_u_m] = other_coord[ax_is_u_m]
nidx.u[ax_is_u_m] = nidx.ori[ax_is_u_m] * (face_reso - 1)
nidx.v[ax_is_v_m] = nidx.ori[ax_is_v_m] * (face_reso - 1)
nidx.ax[mask] = nax
nidx.ori[mask] = ori
_index_across_one_side(mu, ud, uori, nidx.v)
_index_across_one_side(mv, vd, vori, nidx.u)
return nidx
i00 = _index_across_sides(CubemapCoord(idx.ax, idx.ori, uf, vf).clone(),
0, 0, m0u, m0v)
i01 = _index_across_sides(CubemapCoord(idx.ax, idx.ori, uf, vc).clone(),
0, 1, m0u, m1v)
i10 = _index_across_sides(CubemapCoord(idx.ax, idx.ori, uc, vf).clone(),
1, 0, m1u, m0v)
i11 = _index_across_sides(CubemapCoord(idx.ax, idx.ori, uc, vc).clone(),
1, 1, m1u, m1v)
du = idx.u - uf
dv = idx.v - vf
return CubemapBilerpQuery(
i00,
i01,
i10,
i11,
du,
dv)
else:
raise NotImplementedError() | Compute the points on the cubemap for bilinear sampling given a cubemap coordinate from dir_to_cubemap_coord; to be used with cubemap_sample. :param idx: CubemapCoord, cube map coordinate from dir_to_cubemap_coord :param face_reso: int, resolution of cubemap face :param mode: str, interpolation mode; one of nearest, linear_simple, linear; linear_simple interpolates per-face, while linear also interpolates across edges (this is the only one supported in CUDA) :return: CubemapBilerpQuery |
21,056 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
class CubemapBilerpQuery:
i00: CubemapCoord
i01: CubemapCoord
i10: CubemapCoord
i11: CubemapCoord
du: torch.Tensor
dv: torch.Tensor
The provided code snippet includes necessary dependencies for implementing the `cubemap_sample` function. Write a Python function `def cubemap_sample(cubemap: torch.Tensor, idx4 : CubemapBilerpQuery)` to solve the following problem:
Perform bilinear sampling on a cubemap given a query from cubemap_build_query :param cubemap: torch.Tensor float (6, face_reso, face_reso, C) or (B, 6, face_reso, face_reso, C) :param idx4: CubemapBilerpQuery from cubemap_build_query where each tensor has batch size B :return: (B, C)
Here is the function:
def cubemap_sample(cubemap: torch.Tensor, idx4 : CubemapBilerpQuery):
"""
Perform bilinear sampling on a cubemap given a query from cubemap_build_query
:param cubemap: torch.Tensor float
(6, face_reso, face_reso, C)
or
(B, 6, face_reso, face_reso, C)
:param idx4: CubemapBilerpQuery from cubemap_build_query where
each tensor has batch size B
:return: (B, C)
"""
face_reso = cubemap.size(2)
v00 = idx4.i00.query_in(cubemap)
v01 = idx4.i01.query_in(cubemap)
v10 = idx4.i10.query_in(cubemap)
v11 = idx4.i11.query_in(cubemap)
du = idx4.du.view([-1] + (v00.dim() - 1) * [1])
dv = idx4.dv.view([-1] + (v00.dim() - 1) * [1])
r0 = v00 * (1 - dv) + v01 * dv
r1 = v10 * (1 - dv) + v11 * dv
return r0 * (1 - du) + r1 * du | Perform bilinear sampling on a cubemap given a query from cubemap_build_query :param cubemap: torch.Tensor float (6, face_reso, face_reso, C) or (B, 6, face_reso, face_reso, C) :param idx4: CubemapBilerpQuery from cubemap_build_query where each tensor has batch size B :return: (B, C) |
21,057 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def memlog(device='cuda'):
# Memory debugging
print(torch.cuda.memory_summary(device))
import gc
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (
hasattr(obj, 'data') and torch.is_tensor(obj.data)):
if str(obj.device) != 'cpu':
print(obj.device, '{: 10}'.format(obj.numel()),
obj.dtype,
obj.size(), type(obj))
except:
pass | null |
21,058 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
The provided code snippet includes necessary dependencies for implementing the `spher2cart` function. Write a Python function `def spher2cart(theta : torch.Tensor, phi : torch.Tensor)` to solve the following problem:
Convert spherical coordinates into Cartesian coordinates on unit sphere.
Here is the function:
def spher2cart(theta : torch.Tensor, phi : torch.Tensor):
"""Convert spherical coordinates into Cartesian coordinates on unit sphere."""
x = torch.sin(theta) * torch.cos(phi)
y = torch.sin(theta) * torch.sin(phi)
z = torch.cos(theta)
return torch.stack([x, y, z], dim=-1) | Convert spherical coordinates into Cartesian coordinates on unit sphere. |
21,059 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
The provided code snippet includes necessary dependencies for implementing the `eval_sg_at_dirs` function. Write a Python function `def eval_sg_at_dirs(sg_lambda : torch.Tensor, sg_mu : torch.Tensor, dirs : torch.Tensor)` to solve the following problem:
Evaluate spherical Gaussian functions at unit directions using learnable SG basis, without taking linear combination Works with torch. ... Can be 0 or more batch dimensions. N is the number of SG basis we use. :math:`Output = \sigma_{i}{exp ^ {\lambda_i * (\dot(\mu_i, \dirs) - 1)}` :param sg_lambda: The sharpness of the SG lobes. (N), positive :param sg_mu: The directions of the SG lobes. (N, 3), unit vector :param dirs: jnp.ndarray unit directions (..., 3) :return: (..., N)
Here is the function:
def eval_sg_at_dirs(sg_lambda : torch.Tensor, sg_mu : torch.Tensor, dirs : torch.Tensor):
"""
Evaluate spherical Gaussian functions at unit directions
using learnable SG basis,
without taking linear combination
Works with torch.
... Can be 0 or more batch dimensions.
N is the number of SG basis we use.
:math:`Output = \sigma_{i}{exp ^ {\lambda_i * (\dot(\mu_i, \dirs) - 1)}`
:param sg_lambda: The sharpness of the SG lobes. (N), positive
:param sg_mu: The directions of the SG lobes. (N, 3), unit vector
:param dirs: jnp.ndarray unit directions (..., 3)
:return: (..., N)
"""
product = torch.einsum(
"ij,...j->...i", sg_mu, dirs) # [..., N]
basis = torch.exp(torch.einsum(
"i,...i->...i", sg_lambda, product - 1)) # [..., N]
return basis | Evaluate spherical Gaussian functions at unit directions using learnable SG basis, without taking linear combination Works with torch. ... Can be 0 or more batch dimensions. N is the number of SG basis we use. :math:`Output = \sigma_{i}{exp ^ {\lambda_i * (\dot(\mu_i, \dirs) - 1)}` :param sg_lambda: The sharpness of the SG lobes. (N), positive :param sg_mu: The directions of the SG lobes. (N, 3), unit vector :param dirs: jnp.ndarray unit directions (..., 3) :return: (..., N) |
21,060 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.0) | null |
21,061 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
The provided code snippet includes necessary dependencies for implementing the `cross_broadcast` function. Write a Python function `def cross_broadcast(x : torch.Tensor, y : torch.Tensor)` to solve the following problem:
Cross broadcasting for 2 tensors :param x: torch.Tensor :param y: torch.Tensor, should have the same ndim as x :return: tuple of cross-broadcasted tensors x, y. Any dimension where the size of x or y is 1 is expanded to the maximum size in that dimension among the 2. Formally, say the shape of x is (a1, ... an) and of y is (b1, ... bn); then the result has shape (a'1, ... a'n), (b'1, ... b'n) where :code:`a'i = ai if (ai > 1 and bi > 1) else max(ai, bi)` :code:`b'i = bi if (ai > 1 and bi > 1) else max(ai, bi)`
Here is the function:
def cross_broadcast(x : torch.Tensor, y : torch.Tensor):
"""
Cross broadcasting for 2 tensors
:param x: torch.Tensor
:param y: torch.Tensor, should have the same ndim as x
:return: tuple of cross-broadcasted tensors x, y. Any dimension where the size of x or y is 1
is expanded to the maximum size in that dimension among the 2.
Formally, say the shape of x is (a1, ... an)
and of y is (b1, ... bn);
then the result has shape (a'1, ... a'n), (b'1, ... b'n)
where
:code:`a'i = ai if (ai > 1 and bi > 1) else max(ai, bi)`
:code:`b'i = bi if (ai > 1 and bi > 1) else max(ai, bi)`
"""
assert x.ndim == y.ndim, "Only available if ndim is same for all tensors"
max_shape = [(-1 if (a > 1 and b > 1) else max(a,b)) for i, (a, b)
in enumerate(zip(x.shape, y.shape))]
shape_x = [max(a, m) for m, a in zip(max_shape, x.shape)]
shape_y = [max(b, m) for m, b in zip(max_shape, y.shape)]
x = x.broadcast_to(shape_x)
y = y.broadcast_to(shape_y)
return x, y | Cross broadcasting for 2 tensors :param x: torch.Tensor :param y: torch.Tensor, should have the same ndim as x :return: tuple of cross-broadcasted tensors x, y. Any dimension where the size of x or y is 1 is expanded to the maximum size in that dimension among the 2. Formally, say the shape of x is (a1, ... an) and of y is (b1, ... bn); then the result has shape (a'1, ... a'n), (b'1, ... b'n) where :code:`a'i = ai if (ai > 1 and bi > 1) else max(ai, bi)` :code:`b'i = bi if (ai > 1 and bi > 1) else max(ai, bi)` |
21,062 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
The provided code snippet includes necessary dependencies for implementing the `posenc` function. Write a Python function `def posenc( x: torch.Tensor, cov_diag: Optional[torch.Tensor], min_deg: int, max_deg: int, include_identity: bool = True, enable_ipe: bool = True, cutoff: float = 1.0, )` to solve the following problem:
Positional encoding function. Adapted from jaxNeRF (https://github.com/google-research/google-research/tree/master/jaxnerf). With support for mip-NeFF IPE (by passing cov_diag != 0, keeping enable_ipe=True). And BARF-nerfies frequency attenuation (setting cutoff) Cat x with a positional encoding of x with scales 2^[min_deg, max_deg-1], Instead of computing [sin(x), cos(x)], we use the trig identity cos(x) = sin(x + pi/2) and do one vectorized call to sin([x, x+pi/2]). :param x: torch.Tensor (..., D), variables to be encoded. Note that x should be in [-pi, pi]. :param cov_diag: torch.Tensor (..., D), diagonal cov for each variable to be encoded (IPE) :param min_deg: int, the minimum (inclusive) degree of the encoding. :param max_deg: int, the maximum (exclusive) degree of the encoding. if min_deg >= max_deg, positional encoding is disabled. :param include_identity: bool, if true then concatenates the identity :param enable_ipe: bool, if true then uses cov_diag to compute IPE, if available. Note cov_diag = 0 will give the same effect. :param cutoff: float, in [0, 1], a relative frequency cutoff as in BARF/nerfies. 1 = all frequencies, 0 = no frequencies :return: encoded torch.Tensor (..., D * (max_deg - min_deg) * 2 [+ D if include_identity]), encoded variables.
Here is the function:
def posenc(
x: torch.Tensor,
cov_diag: Optional[torch.Tensor],
min_deg: int,
max_deg: int,
include_identity: bool = True,
enable_ipe: bool = True,
cutoff: float = 1.0,
):
"""
Positional encoding function. Adapted from jaxNeRF
(https://github.com/google-research/google-research/tree/master/jaxnerf).
With support for mip-NeFF IPE (by passing cov_diag != 0, keeping enable_ipe=True).
And BARF-nerfies frequency attenuation (setting cutoff)
Cat x with a positional encoding of x with scales 2^[min_deg, max_deg-1],
Instead of computing [sin(x), cos(x)], we use the trig identity
cos(x) = sin(x + pi/2) and do one vectorized call to sin([x, x+pi/2]).
:param x: torch.Tensor (..., D), variables to be encoded. Note that x should be in [-pi, pi].
:param cov_diag: torch.Tensor (..., D), diagonal cov for each variable to be encoded (IPE)
:param min_deg: int, the minimum (inclusive) degree of the encoding.
:param max_deg: int, the maximum (exclusive) degree of the encoding. if min_deg >= max_deg,
positional encoding is disabled.
:param include_identity: bool, if true then concatenates the identity
:param enable_ipe: bool, if true then uses cov_diag to compute IPE, if available.
Note cov_diag = 0 will give the same effect.
:param cutoff: float, in [0, 1], a relative frequency cutoff as in BARF/nerfies. 1 = all frequencies,
0 = no frequencies
:return: encoded torch.Tensor (..., D * (max_deg - min_deg) * 2 [+ D if include_identity]),
encoded variables.
"""
if min_deg >= max_deg:
return x
scales = torch.tensor([2 ** i for i in range(min_deg, max_deg)], device=x.device)
half_enc_dim = x.shape[-1] * scales.shape[0]
shapeb = list(x.shape[:-1]) + [half_enc_dim] # (..., D * (max_deg - min_deg))
xb = torch.reshape((x[..., None, :] * scales[:, None]), shapeb)
four_feat = torch.sin(
torch.cat([xb, xb + 0.5 * np.pi], dim=-1)
) # (..., D * (max_deg - min_deg) * 2)
if enable_ipe and cov_diag is not None:
# Apply integrated positional encoding (IPE)
xb_var = torch.reshape((cov_diag[..., None, :] * scales[:, None] ** 2), shapeb)
xb_var = torch.tile(xb_var, (2,)) # (..., D * (max_deg - min_deg) * 2)
four_feat = four_feat * torch.exp(-0.5 * xb_var)
if cutoff < 1.0:
# BARF/nerfies, could be made cleaner
cutoff_mask = _cutoff_mask(
scales, cutoff * (max_deg - min_deg)
) # (max_deg - min_deg,)
four_feat = four_feat.view(shapeb[:-1] + [2, scales.shape[0], x.shape[-1]])
four_feat = four_feat * cutoff_mask[..., None]
four_feat = four_feat.view(shapeb[:-1] + [2 * scales.shape[0] * x.shape[-1]])
if include_identity:
four_feat = torch.cat([x] + [four_feat], dim=-1)
return four_feat | Positional encoding function. Adapted from jaxNeRF (https://github.com/google-research/google-research/tree/master/jaxnerf). With support for mip-NeFF IPE (by passing cov_diag != 0, keeping enable_ipe=True). And BARF-nerfies frequency attenuation (setting cutoff) Cat x with a positional encoding of x with scales 2^[min_deg, max_deg-1], Instead of computing [sin(x), cos(x)], we use the trig identity cos(x) = sin(x + pi/2) and do one vectorized call to sin([x, x+pi/2]). :param x: torch.Tensor (..., D), variables to be encoded. Note that x should be in [-pi, pi]. :param cov_diag: torch.Tensor (..., D), diagonal cov for each variable to be encoded (IPE) :param min_deg: int, the minimum (inclusive) degree of the encoding. :param max_deg: int, the maximum (exclusive) degree of the encoding. if min_deg >= max_deg, positional encoding is disabled. :param include_identity: bool, if true then concatenates the identity :param enable_ipe: bool, if true then uses cov_diag to compute IPE, if available. Note cov_diag = 0 will give the same effect. :param cutoff: float, in [0, 1], a relative frequency cutoff as in BARF/nerfies. 1 = all frequencies, 0 = no frequencies :return: encoded torch.Tensor (..., D * (max_deg - min_deg) * 2 [+ D if include_identity]), encoded variables. |
21,063 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def net_to_dict(out_dict : dict,
prefix : str,
model : nn.Module):
for child in model.named_children():
layer_name = child[0]
layer_params = {}
for param in child[1].named_parameters():
param_name = param[0]
param_value = param[1].data.cpu().numpy()
out_dict['pt__' + prefix + '__' + layer_name + '__' + param_name] = param_value | null |
21,064 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
def net_from_dict(in_dict,
prefix : str,
model : nn.Module):
for child in model.named_children():
layer_name = child[0]
layer_params = {}
for param in child[1].named_parameters():
param_name = param[0]
value = in_dict['pt__' + prefix + '__' + layer_name + '__' + param_name]
param_value = param[1].data[:] = torch.from_numpy(value).to(
device=param[1].data.device) | null |
21,065 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
The provided code snippet includes necessary dependencies for implementing the `convert_to_ndc` function. Write a Python function `def convert_to_ndc(origins, directions, ndc_coeffs, near: float = 1.0)` to solve the following problem:
Convert a set of rays to NDC coordinates.
Here is the function:
def convert_to_ndc(origins, directions, ndc_coeffs, near: float = 1.0):
"""Convert a set of rays to NDC coordinates."""
# Shift ray origins to near plane, not sure if needed
t = (near - origins[Ellipsis, 2]) / directions[Ellipsis, 2]
origins = origins + t[Ellipsis, None] * directions
dx, dy, dz = directions.unbind(-1)
ox, oy, oz = origins.unbind(-1)
# Projection
o0 = ndc_coeffs[0] * (ox / oz)
o1 = ndc_coeffs[1] * (oy / oz)
o2 = 1 - 2 * near / oz
d0 = ndc_coeffs[0] * (dx / dz - ox / oz)
d1 = ndc_coeffs[1] * (dy / dz - oy / oz)
d2 = 2 * near / oz;
origins = torch.stack([o0, o1, o2], -1)
directions = torch.stack([d0, d1, d2], -1)
return origins, directions | Convert a set of rays to NDC coordinates. |
21,066 | from functools import partial
import torch
from torch import nn
from typing import Optional, Tuple
import numpy as np
from dataclasses import dataclass
import math
The provided code snippet includes necessary dependencies for implementing the `xyz2equirect` function. Write a Python function `def xyz2equirect(bearings, reso)` to solve the following problem:
Convert ray direction vectors into equirectangular pixel coordinates. Inverse of equirect2xyz. Taken from Vickie Ye
Here is the function:
def xyz2equirect(bearings, reso):
"""
Convert ray direction vectors into equirectangular pixel coordinates.
Inverse of equirect2xyz.
Taken from Vickie Ye
"""
lat = torch.asin(bearings[..., 1])
lon = torch.atan2(bearings[..., 0], bearings[..., 2])
x = reso * 2 * (0.5 + lon / 2 / np.pi)
y = reso * (0.5 - lat / np.pi)
return torch.stack([x, y], dim=-1) | Convert ray direction vectors into equirectangular pixel coordinates. Inverse of equirect2xyz. Taken from Vickie Ye |
21,067 |
def setup(app):
import sphinx.search as search
import zh
search.languages["zh_CN"] = zh.SearchChinese | null |
21,068 | import os
import subprocess
import platform
base_link = "http://python.iswbm.com/en/latest/"
def get_file_info(filename):
with open(filename, 'r', encoding="utf-8") as file:
first_line = file.readline().replace("#", "").strip()
return first_line.split(' ', 1)
def make_line(chapter, file):
page_name, _ = os.path.splitext(file)
(index, title) = get_file_info(file)
url = base_link + chapter + "/" + page_name + ".html"
item_list = ["-", index, "[{}]({})\n".format(title, url)]
return " ".join(item_list) | null |
21,069 | import os
import subprocess
import platform
index_path = os.path.join(pwd, "README.md")
readme_header = '''

<p align="center">
<img src='https://img.shields.io/badge/language-Python-blue.svg' alt="Build Status">
<img src='https://img.shields.io/badge/framwork-Sphinx-green.svg'>
<a href='https://www.zhihu.com/people/wongbingming'><img src='https://img.shields.io/badge/dynamic/json?color=0084ff&logo=zhihu&label=%E7%8E%8B%E7%82%B3%E6%98%8E&query=%24.data.totalSubs&url=https%3A%2F%2Fapi.spencerwoo.com%2Fsubstats%2F%3Fsource%3Dzhihu%26queryKey%3Dwongbingming'></a>
<a href='https://juejin.im/user/5b08d982f265da0db3502c55'><img src='https://img.shields.io/badge/掘金-2481-blue'></a>
<a href='http://image.iswbm.com/20200607114246.png'><img src='http://img.shields.io/badge/%E5%85%AC%E4%BC%97%E5%8F%B7-30k+-brightgreen'></a>
</p>
## [项目主页](http://python.iswbm.com/)
在线阅读:[Python 编程时光](http://python.iswbm.com/)

## 文章结构

'''
readme_tooter = '''
---

'''
The provided code snippet includes necessary dependencies for implementing the `render_index_page` function. Write a Python function `def render_index_page(index_info)` to solve the following problem:
生成 readme.md 索引文件,包含所有文件目录
Here is the function:
def render_index_page(index_info):
'''
生成 readme.md 索引文件,包含所有文件目录
'''
# 重新排序
index_info = sorted(index_info.items(), key=lambda item:item[0], reverse=False)
# 写入文件
with open(index_path, 'w+', encoding="utf-8") as file:
file.write(readme_header)
for chp, info in index_info:
chp_name = info["name"]
file.write("## " + chp_name + "\n")
for line in info["contents"]:
file.write(line)
file.write("\n")
file.write(readme_tooter) | 生成 readme.md 索引文件,包含所有文件目录 |
21,070 | import os
import subprocess
import platform
The provided code snippet includes necessary dependencies for implementing the `convert_md5_to_rst` function. Write a Python function `def convert_md5_to_rst(file)` to solve the following problem:
转换格式:md5转换成rst
Here is the function:
def convert_md5_to_rst(file):
'''
转换格式:md5转换成rst
'''
(filename, extension) = os.path.splitext(file)
convert_cmd = 'pandoc -V mainfont="SimSun" -f markdown -t rst {md_file} -o {rst_file}'.format(
md_file=filename+'.md', rst_file=filename+'.rst'
)
# status, output = commands.getstatusoutput(convert_cmd)
status = subprocess.call(convert_cmd.split(" "))
if status != 0:
print("命令执行失败: " + convert_cmd)
os._exit(1)
if status == 0:
print(file + ' 处理完成')
else:
print(file + '处理失败') | 转换格式:md5转换成rst |
21,071 | import os
import subprocess
import platform
blog_path = os.path.join(pwd, "source")
The provided code snippet includes necessary dependencies for implementing the `get_all_dir` function. Write a Python function `def get_all_dir()` to solve the following problem:
获取所有的目录
Here is the function:
def get_all_dir():
'''
获取所有的目录
'''
dir_list = []
file_list = os.listdir(blog_path)
for item in file_list:
abs_path = os.path.join(blog_path, item)
if os.path.isdir(abs_path):
dir_list.append(abs_path)
return dir_list | 获取所有的目录 |
21,072 | import os
import subprocess
import platform
blog_path = os.path.join(pwd, "source")
The provided code snippet includes necessary dependencies for implementing the `init_index_info` function. Write a Python function `def init_index_info()` to solve the following problem:
初始化索引
Here is the function:
def init_index_info():
'''
初始化索引
'''
index_info = {}
chapter_dir = os.path.join(blog_path, "chapters")
os.chdir(chapter_dir)
for file in os.listdir(chapter_dir):
name, _ = os.path.splitext(file)
with open(file, 'r', encoding="utf-8") as f:
chapter_name = f.readlines()[1].strip()
index_info[name.replace("p", "")] = {"name": chapter_name, "contents": []}
return index_info | 初始化索引 |
21,073 | import os
import re
import linecache
from glob import glob
source_dir = os.path.join(pwd, "source")
def get_all_chapter():
all_chapters_path = []
os.chdir(source_dir)
for dir_name in glob("c*"):
if dir_name == "chapters" or dir_name == "conf.py":
continue
all_chapters_path.append(os.path.join(dir_name))
return all_chapters_path | null |
21,074 | import os
import re
import linecache
from glob import glob
pwd = os.getcwd()
def get_chapter_name(file):
return linecache.getline(file, 2).strip()
def generate_mapping(all_chapters_path):
mapping = dict.fromkeys([os.path.basename(chapter_path) for chapter_path in all_chapters_path])
for key in mapping.keys():
chapter_file = os.path.join(pwd, "source", "chapters", key.replace("c", "p") + ".rst")
mapping[key] = get_chapter_name(chapter_file)
return mapping | null |
21,075 | import os
import re
import linecache
from glob import glob
source_dir = os.path.join(pwd, "source")
def get_title(file):
first_line = linecache.getline(file, 1)
if first_line.startswith("#"):
return first_line.strip()
def get_toc_info(all_chapters_path):
toc = {}
for dir_name in all_chapters_path:
chapter_toc = {}
os.chdir(os.path.join(source_dir, dir_name))
for file_name in sorted(glob(dir_name + "*.md")):
section = int(re.findall(r"c\d{2}_(\d{2}).md", file_name)[0])
md_path = os.path.join("https://magic.iswbm.com/", dir_name, file_name.replace("md", "html"))
title = get_title(file_name)
if not title:
continue
chapter_toc[section] = (title, md_path)
toc[dir_name] = chapter_toc
return toc | null |
21,076 | import os
import re
import linecache
from glob import glob
def print_md_toc(toc_info, mapping):
for chapter in sorted(toc_info.items(), key=lambda item: item[0]):
posts = chapter[1]
chapter_name = mapping[chapter[0]]
print(f"- **{chapter_name}**")
for post in sorted(posts.items(), key=lambda item:item[0]):
# print title only
# print(f"{post[1][0]}")
print(" ", f"* [{post[1][0]}]({post[1][1]})") | null |
21,077 | import json
import os
import argparse
import deepspeed
import deepspeed.comm as dist
import numpy as np
import sentencepiece as spm
import torch
from models.configuration_baichuan import BaiChuanConfig
from models.modeling_baichuan import BaiChuanForCausalLM
def get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="data_dir",
help="Text files to do pre-train on")
parser.add_argument("--tokenizer_path", type=str,
default="tokenizer.model",
help="Tokenizer model file path")
parser.add_argument("--max_length", type=int, default=4096,
help="Max tokens per sentence in corpus")
parser.add_argument("--steps_per_epoch", type=int, default=4096,
help="Step intervals to save checkpoint")
parser.add_argument("--checkpoint_saving_path", type=str,
default="checkpoints",
help="Path to store checkpoint files")
parser.add_argument("--local_rank", type=int, default=-1,
help="Reserved for deepspeed framework")
return parser | null |
21,078 | import json
import os
import argparse
import deepspeed
import deepspeed.comm as dist
import numpy as np
import sentencepiece as spm
import torch
from models.configuration_baichuan import BaiChuanConfig
from models.modeling_baichuan import BaiChuanForCausalLM
args = arg_parser.parse_args()
class DataEngine():
def __init__(self, data_dir, tokenizer_path, micro_batch_size, max_length):
self.MIN_TEXT_LEN = 20
self.EOS_TOKEN_ID = 2
self.data_dir = data_dir
self.sp = spm.SentencePieceProcessor()
self.sp.Load(tokenizer_path)
self.micro_batch_size = micro_batch_size
self.max_length = max_length
self.data = []
self.global_input_paths = [self.data_dir + "/" + x
for x in os.listdir(self.data_dir)]
self.local_input_paths = [x for i, x in
enumerate(self.global_input_paths)
if i % dist.get_world_size() == dist.get_rank()]
def load_data(self):
for file_path in self.local_input_paths:
data = []
with open(file_path, encoding="utf-8", errors="ignore") as f:
for line_id, line in enumerate(f):
cc = self.sp.EncodeAsIds(line.strip()) + [self.EOS_TOKEN_ID]
if len(cc) < self.MIN_TEXT_LEN:
cc = []
data.extend(cc)
if len(data) >= self.micro_batch_size * (self.max_length + 1):
index = self.micro_batch_size * (self.max_length + 1)
self.data.append(data[:index])
data = []
return
def get_data(self):
data = self.data.pop(0)
seq = np.asarray(data).reshape(self.micro_batch_size, self.max_length + 1)
data = torch.LongTensor(seq)
data = data.cuda(non_blocking=True)
return data
def prepare_data():
data_dir = args.data_dir
tokenizer_path = args.tokenizer_path
ds_config = json.load(open(args.deepspeed_config))
micro_batch_size = ds_config["train_micro_batch_size_per_gpu"]
max_length = args.max_length
data_engine = DataEngine(data_dir, tokenizer_path, micro_batch_size, max_length)
data_engine.load_data()
return data_engine | null |
21,079 | import json
import os
import argparse
import deepspeed
import deepspeed.comm as dist
import numpy as np
import sentencepiece as spm
import torch
from models.configuration_baichuan import BaiChuanConfig
from models.modeling_baichuan import BaiChuanForCausalLM
args = arg_parser.parse_args()
deepspeed.init_distributed()
class BaiChuanConfig(PretrainedConfig):
model_type = "baichuan"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=64000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
hidden_act="silu",
max_position_embeddings=4096,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class BaiChuanForCausalLM(PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model = Model(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, ModelForCausalLM
>>> model = ModelForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
def prepare_model():
with deepspeed.zero.Init(config_dict_or_path=args.deepspeed_config,
enabled=True,
mem_efficient_linear=False,
mpu=None):
model = BaiChuanForCausalLM(BaiChuanConfig())
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
model_engine, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=None,
model_parameters=model_parameters)
return model_engine | null |
21,080 | import json
import os
import argparse
import deepspeed
import deepspeed.comm as dist
import numpy as np
import sentencepiece as spm
import torch
from models.configuration_baichuan import BaiChuanConfig
from models.modeling_baichuan import BaiChuanForCausalLM
args = arg_parser.parse_args()
def train(data_engine, model_engine):
model_engine.train()
step = 0
while step < args.steps_per_epoch:
data = data_engine.get_data()
loss = model_engine(data, labels=data).loss
model_engine.backward(loss)
model_engine.step()
step += 1
return | null |
21,081 | import argparse
import json
import os
from tqdm import tqdm
import numpy as np
import torch
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizerBase,
)
def parse_argument():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path", type=str, required=True, help="model name or path"
)
parser.add_argument(
"--shot", type=int, default=5, help="number of shot for few-shot learning"
)
parser.add_argument(
"--split", type=str, default="val", help="split of dataset to evaluate"
)
parser.add_argument(
"--output_dir", type=str, default="ceval_output", help="output directory"
)
return parser.parse_args() | null |
21,082 | import argparse
import os
import torch
import numpy as np
import pandas as pd
from categories import subcategories, categories
from transformers import AutoTokenizer,AutoModelForCausalLM
import time
choices = ["A", "B", "C", "D"]
def format_example(df, idx, include_answer=True):
prompt = df.iloc[idx, 0]
k = df.shape[1] - 2
for j in range(k):
prompt += "\n{}. {}".format(choices[j], df.iloc[idx, j + 1])
prompt += "\nAnswer:"
if include_answer:
prompt += " {}\n\n".format(df.iloc[idx, k + 1])
return prompt
def gen_prompt(train_df, subject, k=-1):
prompt = "The following are multiple choice questions (with answers) about {}.\n\n".format(
format_subject(subject)
)
if k == -1:
k = train_df.shape[0]
for i in range(k):
prompt += format_example(train_df, i)
return prompt
def eval(args, subject, model, tokenizer, dev_df, test_df):
cors = []
all_probs = []
answers = choices[: test_df.shape[1] - 2]
for i in range(test_df.shape[0]):
# get prompt and make sure it fits
k = args.ntrain
prompt_end = format_example(test_df, i, include_answer=False)
train_prompt = gen_prompt(dev_df, subject, k)
prompt = train_prompt + prompt_end
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
while input_ids.shape[-1] > 2048:
k -= 1
train_prompt = gen_prompt(dev_df, subject, k)
prompt = train_prompt + prompt_end
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
label = test_df.iloc[i, test_df.shape[1] - 1]
logits = model(
input_ids=input_ids,
).logits[:,-1].flatten()
probs = (
torch.nn.functional.softmax(
torch.tensor(
[
logits[tokenizer("A").input_ids[-1]],
logits[tokenizer("B").input_ids[-1]],
logits[tokenizer("C").input_ids[-1]],
logits[tokenizer("D").input_ids[-1]],
]
),
dim=0,
)
.detach()
.cpu()
.to(torch.float32)
.numpy()
)
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs)]
cor = pred == label
cors.append(cor)
all_probs.append(probs)
acc = np.mean(cors)
cors = np.array(cors)
all_probs = np.array(all_probs)
print("Average accuracy {:.3f} - {}".format(acc, subject))
return cors, acc, all_probs | null |
21,083 | import math
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch.utils.checkpoint
from transformers import PreTrainedModel, add_start_docstrings
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from transformers.utils import logging, add_start_docstrings_to_model_forward, replace_return_docstrings
from xformers import ops as xops
from .configuration_baichuan import BaiChuanConfig
The provided code snippet includes necessary dependencies for implementing the `_make_causal_mask` function. Write a Python function `def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 )` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | Make causal mask used for bi-directional self-attention. |
21,084 | import math
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch.utils.checkpoint
from transformers import PreTrainedModel, add_start_docstrings
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from transformers.utils import logging, add_start_docstrings_to_model_forward, replace_return_docstrings
from xformers import ops as xops
from .configuration_baichuan import BaiChuanConfig
The provided code snippet includes necessary dependencies for implementing the `_expand_mask` function. Write a Python function `def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)` to solve the following problem:
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
Here is the function:
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
21,085 | import math
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch.utils.checkpoint
from transformers import PreTrainedModel, add_start_docstrings
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from transformers.utils import logging, add_start_docstrings_to_model_forward, replace_return_docstrings
from xformers import ops as xops
from .configuration_baichuan import BaiChuanConfig
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed | null |
21,086 | import os
from typing import Dict, List, Tuple
from setuptools import find_packages, setup
def _setup_packages() -> List:
return find_packages(
"src", include=["sparseml", "sparseml.*"], exclude=["*.__pycache__.*"]
) | null |
21,087 | import os
from typing import Dict, List, Tuple
from setuptools import find_packages, setup
def _setup_package_dir() -> Dict:
return {"": "src"} | null |
21,088 | import os
from typing import Dict, List, Tuple
from setuptools import find_packages, setup
_deps = [
"setuptools<=59.5.0",
"pyyaml>=5.0.0",
"numpy>=1.0.0",
"matplotlib>=3.0.0",
"merge-args>=0.1.0",
"onnx>=1.5.0,<1.15.0",
"pandas>=0.25.0",
"packaging>=20.0",
"psutil>=5.0.0",
"pydantic>=1.8.2,<2.0.0",
"requests>=2.0.0",
"scikit-learn>=0.24.2",
"scipy<1.9.2,>=1.8; python_version <= '3.9'",
"scipy>=1.0.0; python_version > '3.9'",
"tqdm>=4.0.0",
"toposort>=1.0",
"GPUtil>=1.4.0",
"protobuf>=3.12.2,<=3.20.3",
"click>=7.1.2,!=8.0.0", # latest version < 8.0 + blocked version with reported bug
]
_nm_deps = [f"{'sparsezoo' if is_release else 'sparsezoo-nightly'}~={version_nm_deps}"]
def _setup_install_requires() -> List:
return _nm_deps + _deps | null |
21,089 | import os
from typing import Dict, List, Tuple
from setuptools import find_packages, setup
_deepsparse_deps = [
f"{'deepsparse' if is_release else 'deepsparse-nightly'}~={version_nm_deps}"
]
_deepsparse_ent_deps = [f"deepsparse-ent~={version_nm_deps}"]
_onnxruntime_deps = ["onnxruntime>=1.0.0"]
_clip_deps = ["open_clip_torch==2.20.0"]
_pytorch_deps = [
supported_torch_version,
"gputils",
]
_pytorch_all_deps = _pytorch_deps + [
"torchvision>=0.3.0,<0.17",
"torchaudio<=2.0.1",
]
_pytorch_vision_deps = _pytorch_deps + [
"torchvision>=0.3.0,<0.17",
"opencv-python<=4.6.0.66",
]
_transformers_deps = _pytorch_deps + [
f"{'nm-transformers' if is_release else 'nm-transformers-nightly'}"
f"~={version_nm_deps}",
"datasets<=2.14.6",
"dvc",
"scikit-learn",
"seqeval",
"einops",
"evaluate>=0.4.1",
"accelerate>=0.20.3",
"safetensors>=0.4.1",
]
_llm_deps = _transformers_deps + ["sentencepiece"]
_yolov5_deps = _pytorch_vision_deps + [
f"{'nm-yolov5' if is_release else 'nm-yolov5-nightly'}~={version_nm_deps}"
]
_notebook_deps = [
"jupyter>=1.0.0",
"ipywidgets>=7.0.0",
]
_tensorflow_v1_deps = ["tensorflow<2.0.0", "tensorboard<2.0.0", "tf2onnx>=1.0.0,<1.6"]
_tensorflow_v1_gpu_deps = [
"tensorflow-gpu<2.0.0",
"tensorboard<2.0.0",
"tf2onnx>=1.0.0,<1.6",
]
_keras_deps = ["tensorflow~=2.2.0", "keras2onnx>=1.0.0"]
_open_pif_paf_deps = ["openpifpaf==0.13.6"]
_dev_deps = [
"beautifulsoup4==4.9.3",
"black==22.12.0",
"flake8==3.9.2",
"isort==5.8.0",
"wheel>=0.36.2",
"pytest>=6.0.0",
"pytest-mock>=3.6.0",
"pytest-rerunfailures>=13.0",
"tensorboard>=1.0,<2.9",
"tensorboardX>=1.0",
"evaluate>=0.4.1",
]
_docs_deps = [
"m2r2>=0.2.7",
"mistune<3,>=2.0.3",
"myst-parser>=0.14.0",
"rinohtype~=0.4.2",
"sphinx~=3.5.0",
"sphinx-copybutton~=0.3.0",
"sphinx-markdown-tables~=0.0.15",
"sphinx-multiversion~=0.2.4",
"sphinx-pydantic~=0.1.0",
"sphinx-rtd-theme~=0.5.0",
"docutils<0.17",
]
_ultralytics_deps = [
"ultralytics==8.0.124",
supported_torch_version,
]
def _setup_extras() -> Dict:
return {
"clip": _clip_deps,
"dev": _dev_deps,
"docs": _docs_deps,
"deepsparse": _deepsparse_deps,
"deepsparse-ent": _deepsparse_ent_deps,
"openpifpaf": _open_pif_paf_deps,
"onnxruntime": _onnxruntime_deps,
"torch": _pytorch_deps,
"torch_all": _pytorch_all_deps,
"torchvision": _pytorch_vision_deps,
"transformers": _transformers_deps,
"llm": _llm_deps,
"notebook": _notebook_deps,
"tf_v1": _tensorflow_v1_deps,
"tf_v1_gpu": _tensorflow_v1_gpu_deps,
"tf_keras": _keras_deps,
"ultralytics": _ultralytics_deps,
"yolov5": _yolov5_deps,
} | null |
21,090 | import os
from typing import Dict, List, Tuple
from setuptools import find_packages, setup
def _setup_entry_points() -> Dict:
entry_points = {
"console_scripts": [
# export
"sparseml.export=sparseml.export.export:main",
# sparsification
"sparseml.framework=sparseml.framework.info:_main",
"sparseml.sparsification=sparseml.sparsification.info:_main",
]
}
# transformers integration
for task in [
"masked_language_modeling",
"question_answering",
"text_classification",
"token_classification",
]:
entry_points["console_scripts"].extend(
[
f"sparseml.transformers.{task}=sparseml.transformers.{task}:main",
f"sparseml.transformers.train.{task}=sparseml.transformers.{task}:main",
]
)
entry_points["console_scripts"].extend(
[
"sparseml.transformers.export_onnx=sparseml.transformers.export:main",
"sparseml.transformers.export_onnx_refactor=sparseml.transformers.sparsification.obcq.export:main", # noqa 501
]
)
entry_points["console_scripts"].extend(
[
"sparseml.transformers.text_generation.apply=sparseml.transformers.finetune.text_generation:apply", # noqa 501
"sparseml.transformers.text_generation.compress=sparseml.transformers.finetune.text_generation:apply", # noqa 501
"sparseml.transformers.text_generation.train=sparseml.transformers.finetune.text_generation:train", # noqa 501
"sparseml.transformers.text_generation.finetune=sparseml.transformers.finetune.text_generation:train", # noqa 501
"sparseml.transformers.text_generation.eval=sparseml.transformers.finetune.text_generation:eval", # noqa 501
"sparseml.transformers.text_generation.oneshot=sparseml.transformers.finetune.text_generation:oneshot", # noqa 501
]
)
# image classification integration
entry_points["console_scripts"].extend(
[
"sparseml.image_classification.export_onnx="
"sparseml.pytorch.torchvision.export_onnx:main",
"sparseml.image_classification.train="
"sparseml.pytorch.torchvision.train:cli",
]
)
entry_points["console_scripts"].extend(
[
"sparseml.pytorch.image_classification.export_onnx="
"sparseml.pytorch.image_classification.export:main",
"sparseml.pytorch.image_classification.train="
"sparseml.pytorch.image_classification.train:main",
"sparseml.pytorch.image_classification.lr_analysis="
"sparseml.pytorch.image_classification.lr_analysis:main",
"sparseml.pytorch.image_classification.pr_sensitivity="
"sparseml.pytorch.image_classification.pr_sensitivity:main",
]
)
# object detection integration
entry_points["console_scripts"].extend(
[
"sparseml.yolov5.export_onnx=sparseml.yolov5.scripts:export",
"sparseml.yolov5.train=sparseml.yolov5.scripts:train",
"sparseml.yolov5.validation=sparseml.yolov5.scripts:val",
]
)
# instance segmentation integration
yolact_top_level_callable = "sparseml.yolact"
yolact_scripts_path = "sparseml.yolact.scripts"
entry_points["console_scripts"].extend(
[
f"{yolact_top_level_callable}.export_onnx={yolact_scripts_path}:export",
f"{yolact_top_level_callable}.train={yolact_scripts_path}:train",
f"{yolact_top_level_callable}.validation={yolact_scripts_path}:val",
f"{yolact_top_level_callable}.download={yolact_scripts_path}:download",
]
)
# recipe_template entrypoint
entry_points["console_scripts"].append(
"sparseml.recipe_template=sparseml.pytorch.recipe_template.cli:main"
)
# pose detection entrypoint
entry_points["console_scripts"].extend(
[
"sparseml.openpifpaf.train=sparseml.openpifpaf.train:main",
"sparseml.openpifpaf.export_onnx=sparseml.openpifpaf.export:main",
]
)
entry_points["console_scripts"].extend(
[
"sparseml.ultralytics.train=sparseml.yolov8.train:main",
"sparseml.ultralytics.val=sparseml.yolov8.val:main",
"sparseml.ultralytics.export_onnx=sparseml.yolov8.export:main",
]
)
# eval entrypoint
entry_points["console_scripts"].append(
"sparseml.evaluate=sparseml.evaluation.cli:main"
)
return entry_points | null |
21,091 | import os
from typing import Dict, List, Tuple
from setuptools import find_packages, setup
def _setup_long_description() -> Tuple[str, str]:
return open("README.md", "r", encoding="utf-8").read(), "text/markdown" | null |
21,092 | from datetime import date
version_base = "1.7.0"
is_release = False
is_dev = False
dev_number = None
def _generate_version():
if is_release:
return version_base
elif is_dev:
return f"{version_base}.dev{dev_number}"
else:
return f"{version_base}.{date.today().strftime('%Y%m%d')}" | null |
21,093 | import logging
import os
import shutil
from pathlib import Path
from typing import Any, List, Optional, Union
import numpy
import click
import sparseml.core.session as session_manager
from sparseml.export.helpers import (
AVAILABLE_DEPLOYMENT_TARGETS,
ONNX_MODEL_NAME,
create_deployment_folder,
create_export_kwargs,
process_source_path,
save_model_with_external_data,
)
from sparseml.utils.helpers import parse_kwarg_tuples
from sparsezoo.utils.numpy import load_numpy
_LOGGER = logging.getLogger(__name__)
AVAILABLE_DEPLOYMENT_TARGETS = [target.value for target in ExportTargets]
ONNX_MODEL_NAME = "model.onnx"
def process_source_path(source_path: Union[Path, str]) -> str:
"""
Format the source path to be an absolute posix path.
If the source path is a zoo stub, return the path to
the training directory
"""
if isinstance(source_path, str):
if source_path.startswith("zoo:"):
source_path = Model(source_path).training.path
return source_path
source_path = Path(source_path)
source_path = source_path.absolute()
if not source_path.is_dir():
raise ValueError(
f"Argument: source_path must be a directory. " f"Got {source_path} instead."
)
return source_path.as_posix()
def create_export_kwargs(
loaded_model_kwargs: Dict[str, Any], export_target: str = "deepsparse"
) -> Dict[str, Any]:
"""
Retrieve the export kwargs from the loaded model kwargs.
The export kwargs are the kwargs that are passed to the export function.
Given the loaded model kwargs and the export_target, one can define which
loaded_model_kwargs should be routed to the export kwargs.
:param loaded_model_kwargs: The loaded model kwargs.
:param export_target: The export target.
:return: The export kwargs.
"""
if export_target not in AVAILABLE_DEPLOYMENT_TARGETS:
raise ValueError(
f"Export target {export_target} not in "
f"available targets {AVAILABLE_DEPLOYMENT_TARGETS}"
)
export_kwargs = {}
input_names = loaded_model_kwargs.get("input_names")
if input_names is not None:
export_kwargs["input_names"] = input_names
return export_kwargs
def create_deployment_folder(
target_path: Union[Path, str],
deployment_directory_files_mandatory: List[str],
source_path: Union[Path, str, None] = None,
deployment_directory_files_optional: Optional[List[str]] = None,
deployment_directory_name: str = "deployment",
onnx_model_name: Optional[str] = None,
) -> str:
"""
Copy the relevant files to the deployment folder.
The deployment folder will be created at target_path/deployment_directory_name.
The relevant files are copied from:
- if file is an ONNX model (or ONNX data file), the file will be copied
from target_path
- else, the file will be copied from source_path
:param source_path: The path to the source folder. This is where the ONNX model
and (optionally) ONNX data file are located.
:param target_path: The path to the target folder.
:param deployment_directory_name: The name of the deployment directory.
The files will be copied to target_path/deployment_directory_name.
:param source_path: The path to the source folder (where the original model
files are stored)
:param deployment_directory_files_mandatory: The mandatory list of files
to copy to the deployment directory. If the file is an ONNX model
(or ONNX data file), the file will be copied from target_path.
Else, the file will be copied from source_path.
:param deployment_directory_files_optional: The optional list of files
to copy to the deployment directory.
:param onnx_model_name: The name of the ONNX model file. If not specified,
defaults to ONNX_MODEL_NAME.
:return: The path to the deployment folder.
"""
# create the deployment folder
deployment_folder_dir = os.path.join(target_path, deployment_directory_name)
if os.path.isdir(deployment_folder_dir):
shutil.rmtree(deployment_folder_dir)
os.makedirs(deployment_folder_dir, exist_ok=True)
# prepare for moving the data
deployment_directory_files_optional = deployment_directory_files_optional or []
deployment_directory_files_mandatory.remove(ONNX_MODEL_NAME)
# move the model and (if required) the data files
move_onnx_files(
target_path=target_path,
deployment_folder_dir=deployment_folder_dir,
onnx_model_name=onnx_model_name,
)
if source_path is None:
return deployment_folder_dir
# copy the relevant files from source_path
for file_name in deployment_directory_files_mandatory:
copy_mandatory_deployment_files(
file_name, source_path, target_path, onnx_model_name, deployment_folder_dir
)
for file_name in deployment_directory_files_optional:
copy_optional_deployment_files(file_name, source_path, deployment_folder_dir)
return deployment_folder_dir
def save_model_with_external_data(
onnx_file_path: Union[str, Path], external_data_chunk_size_mb: Optional[int] = None
):
onnx_model = load_model(onnx_file_path)
if external_data_chunk_size_mb is not None:
_LOGGER.debug(
"Splitting the model into "
f"{os.path.basename(onnx_file_path)} (graph definition) and one or more "
f"{ONNX_DATA_NAME} files (constant tensor data). The size of each "
f"{ONNX_DATA_NAME} file will not exceed {external_data_chunk_size_mb} MB.",
)
save_onnx(
onnx_model,
onnx_file_path,
external_data_file=ONNX_DATA_NAME,
max_external_data_chunk_size=external_data_chunk_size_mb * 1024 * 1024,
)
elif onnx_includes_external_data(onnx_model):
_LOGGER.debug(
"Splitting the model into"
f"{os.path.basename(onnx_file_path)} (graph definition) and one or more "
f"{ONNX_DATA_NAME} files (constant tensor data)"
)
save_onnx(onnx_model, onnx_file_path, external_data_file=ONNX_DATA_NAME)
else:
_LOGGER.debug(
"save_with_external_data = True ignored, the model already "
"has been saved with external data"
)
def export_data_samples(
target_path: Union[Path, str],
input_samples: Optional[List[Any]] = None,
output_samples: Optional[List[Any]] = None,
label_samples: Optional[List[Any]] = None,
as_tar: bool = False,
):
"""
Save the input, labels and output samples to the target path.
All the input files are optional. If a sample is None,
it will not be saved.
Input samples will be saved to:
.../sample-inputs/inp_0001.npz
.../sample-inputs/inp_0002.npz
...
Output samples will be saved to:
.../sample-outputs/out_0001.npz
.../sample-outputs/out_0002.npz
...
Label samples will be saved to:
.../sample-labels/lab_0001.npz
.../sample-labels/lab_0002.npz
...
If as_tar is True, the samples will be saved as tar files:
.../sample-inputs.tar.gz
.../sample-outputs.tar.gz
.../sample-labels.tar.gz
:param input_samples: The input samples to save.
:param output_samples: The output samples to save.
:param label_samples: The label samples to save.
:param target_path: The path to save the samples to.
:param as_tar: Whether to save the samples as tar files.
"""
for samples, names in zip(
[input_samples, output_samples, label_samples],
[InputsNames, OutputsNames, LabelNames],
):
if len(samples) > 0:
_check_if_samples_already_exist(
os.path.join(target_path, names.basename.value)
)
_LOGGER.info(f"Exporting {names.basename.value} to {target_path}...")
break_batch = isinstance(samples[0], dict)
export_data_sample(samples, names, target_path, as_tar, break_batch)
_LOGGER.info(
f"Successfully exported {names.basename.value} to {target_path}!"
)
def resolve_integration(
source_path: Union[Path, str, None] = None,
integration: Optional[str] = None,
) -> str:
"""
Resolve the integration to use.
If integration is not provided, attempt to infer it from the source_path.
Once the integration is resolved, perform the hot import to register
the integration helper functions.
:param source_path: The path to the PyTorch model to export.
:param integration: Optional name of the integration to use. If not provided,
will attempt to infer it from the source_path.
:return: The name of the integration to use for exporting the model.
"""
integration = integration or _infer_integration_from_source_path(source_path)
if integration == Integrations.image_classification.value:
import sparseml.pytorch.image_classification.integration_helper_functions # noqa F401
return Integrations.image_classification.value
elif integration == Integrations.transformers.value:
import sparseml.transformers.integration_helper_functions # noqa F401
return Integrations.transformers.value
else:
raise ValueError(
f"Could not infer integration from source_path:\n{source_path}\n"
"Please specify an argument `integration` from one of "
"the available integrations: "
f"{[integration.value for integration in Integrations]}."
)
class IntegrationHelperFunctions(RegistryMixin, BaseModel):
"""
Registry that maps names to helper functions
for creation/export/manipulation of models for a specific
integration.
"""
create_model: Callable[
[Union[str, Path]],
Tuple[
"torch.nn.Module", # noqa F821
Optional[Dict[str, Any]],
],
] = Field(
description="A function that takes: "
"- a source path to a PyTorch model "
"- (optionally) additional arguments"
"and returns: "
"- a (sparse) PyTorch model "
"- (optionally) loaded_model_kwargs "
"(any relevant objects created along with the model)"
)
create_data_loader: Callable[
[],
Tuple[
Union["torch.utils.data.DataLoader", Generator], # noqa F821
Optional[Dict[str, Any]],
],
] = Field(
description="A function that takes: "
"arbitrary arguments and returns: "
"- a dataloader "
"- (optionally) loaded data_loader kwargs "
"(any relevant objects created along with the data_loader)"
)
create_dummy_input: Callable[[Any], "torch.Tensor"] = Field( # noqa F821
description="A function that takes: "
"- appropriate arguments "
"and returns: "
"- a dummy input for the model (a torch.Tensor) "
)
export: Callable[[Any], str] = Field(
description="A function that takes: "
" - a (sparse) PyTorch model "
" - sample input data "
" - the path to save the exported model to "
" - the name to save the exported ONNX model as "
" - the deployment target to export to "
" - the opset to use for the export "
" - (optionally) a dictionary of additional arguments"
"and returns the path to the exported model",
default=export_model,
)
apply_optimizations: Optional[Callable[[Any], None]] = Field(
description="A function that takes:"
" - path to the exported model"
" - names of the optimizations to apply"
" and applies the optimizations to the model",
)
create_data_samples: Callable[
[
Tuple[
Optional["torch.nn.Module"], int, Optional[Dict[str, Any]] # noqa: F821
]
],
Tuple[
List["torch.Tensor"], # noqa F821
Optional[List["torch.Tensor"]], # noqa F821
Optional[List["torch.Tensor"]], # noqa F821
],
] = Field(
default=create_data_samples_,
description="A function that takes: "
" - (optionally) a (sparse) PyTorch model "
" - the number of samples to generate "
" - (optionally) loaded_model_kwargs "
"(any relevant objects created along with the model) "
"and returns: "
" - the inputs, (optionally) labels and (optionally) outputs as torch tensors ",
)
deployment_directory_files_mandatory: List[str] = Field(
description="A list that describes the "
"mandatory expected files of the deployment directory",
default=["model.onnx"],
)
deployment_directory_files_optional: Optional[List[str]] = Field(
description="A list that describes the "
"optional expected files of the deployment directory",
)
TORCH_DEFAULT_ONNX_OPSET = _default_opset()
def default_device() -> str:
"""
:return: the device that should be defaulted to for the current setup.
if multiple gpus are available then will return a string with all of them,
else if single gpu available then will return cuda,
else returns cpu
"""
if not torch.cuda.is_available():
return "cpu"
if torch.cuda.device_count() < 2:
return "cuda"
device_ids = [str(i) for i in range(torch.cuda.device_count())]
return "cuda:{}".format(",".join(device_ids))
The provided code snippet includes necessary dependencies for implementing the `export` function. Write a Python function `def export( source_path: Union[Path, str] = None, target_path: Union[Path, str, None] = None, model: Optional["torch.nn.Module"] = None, # noqa F401 onnx_model_name: str = ONNX_MODEL_NAME, deployment_target: str = "deepsparse", opset: Optional[int] = None, save_with_external_data: bool = False, external_data_chunk_size_mb: Optional[int] = None, num_export_samples: int = 0, recipe: Optional[Union[Path, str]] = None, deployment_directory_name: str = "deployment", device: str = "cpu", graph_optimizations: Union[str, List[str], None] = "all", validate_correctness: bool = False, validate_structure: bool = True, integration: Optional[str] = None, sample_data: Optional[Any] = None, task: Optional[str] = None, **kwargs, )` to solve the following problem:
Export a PyTorch model that is either: - located in source_path (and will be loaded) - passed directly to the function to target_path. The deployment files will be located at target_path/deployment_directory_name The exporting logic consists of the following steps: 1. Create the model (if required) and the data loader using the integration-specific `create_model` and `create_data_loader` functions. 2. Export the model to ONNX using the integration-specific `export` function. 3. Apply the graph optimizations to the exported model. 4. Create the deployment folder at target_path/deployment_directory_name using the integration-specific `create_deployment_folder` function. 5. Optionally, export samples using the integration-specific `create_data_samples` function. 6. Optionally, validate the correctness of the exported model using the integration-specific `validate_correctness` function. 7. Optionally, validate the structure of the exported model using the integration-specific `validate_structure` function. :param source_path: The path to the PyTorch model to export. Will be omitted if model is provided :param target_path: The path to save the exported model to. If not provided will default to source_path :param model: The PyTorch model to export. If provided, the source_path should be set to None to avoid potential confusion and entaglement of sources. This means that, the full export logic will not be enforced (e.g. the final deployment directory will not be complete, it will not be possible to run validate_structure method or apply some optimizations that require complete deployment directory structure) :param onnx_model_name: The name of the exported model. Defaults to ONNX_MODEL_NAME. :param deployment_target: The deployment target to export the model to. Defaults to 'deepsparse'. :param opset: The ONNX opset to use for exporting the model. Defaults to the latest supported opset. :param recipe: The path to the recipe to use for exporting the model. Defaults to None. If a recipe is found in the source_path, it will be automatically used for export. :param save_with_external_data: if True, large constant tensors, such as initializers, will be serialised in a separate file. Defaults to False. Note: if the model is sufficiently large, it will be saved with external data regardless of this flag. :param external_data_chunk_size_mb: The size of the external data chunks to use for exporting the model. Defaults to None, which will use the default chunk size. If set, will force the export with external data. :param num_export_samples: The number of samples to create for the exported model. Defaults to 0. :param deployment_directory_name: The name of the deployment directory to create for the exported model. Thus, the exported model will be saved to `target_path/deployment_directory_name`. Defaults to 'deployment'. :param device: The device to use for exporting the model. Defaults to 'auto'. :param graph_optimizations: The graph optimizations to apply to the exported model. Defaults to 'all'. :param validate_correctness: Whether to validate the correctness of the exported model. Defaults to False. :param validate_structure: Whether to validate the structure of the exporter model (contents of the target_path). :param integration: The name of the integration to use for exporting the model. Defaults to None, which will infer the integration from the source_path. :param sample_data: Optional sample data to use for exporting the model. If not provided, a dummy input will be created for the model. Defaults to None. :param task: Optional task to use for exporting the model. Defaults to None.
Here is the function:
def export(
source_path: Union[Path, str] = None,
target_path: Union[Path, str, None] = None,
model: Optional["torch.nn.Module"] = None, # noqa F401
onnx_model_name: str = ONNX_MODEL_NAME,
deployment_target: str = "deepsparse",
opset: Optional[int] = None,
save_with_external_data: bool = False,
external_data_chunk_size_mb: Optional[int] = None,
num_export_samples: int = 0,
recipe: Optional[Union[Path, str]] = None,
deployment_directory_name: str = "deployment",
device: str = "cpu",
graph_optimizations: Union[str, List[str], None] = "all",
validate_correctness: bool = False,
validate_structure: bool = True,
integration: Optional[str] = None,
sample_data: Optional[Any] = None,
task: Optional[str] = None,
**kwargs,
):
"""
Export a PyTorch model that is either:
- located in source_path (and will be loaded)
- passed directly to the function
to target_path.
The deployment files will be located at target_path/deployment_directory_name
The exporting logic consists of the following steps:
1. Create the model (if required) and the data loader using the
integration-specific `create_model` and `create_data_loader` functions.
2. Export the model to ONNX using the integration-specific `export` function.
3. Apply the graph optimizations to the exported model.
4. Create the deployment folder at target_path/deployment_directory_name
using the integration-specific `create_deployment_folder` function.
5. Optionally, export samples using the integration-specific
`create_data_samples` function.
6. Optionally, validate the correctness of the exported model using
the integration-specific `validate_correctness` function.
7. Optionally, validate the structure of the exported model using
the integration-specific `validate_structure` function.
:param source_path: The path to the PyTorch model to export. Will be
omitted if model is provided
:param target_path: The path to save the exported model to. If not provided
will default to source_path
:param model: The PyTorch model to export. If provided, the source_path
should be set to None to avoid potential confusion and entaglement
of sources. This means that, the full
export logic will not be enforced (e.g. the final deployment directory
will not be complete, it will not be possible to run validate_structure
method or apply some optimizations that require complete deployment
directory structure)
:param onnx_model_name: The name of the exported model.
Defaults to ONNX_MODEL_NAME.
:param deployment_target: The deployment target to export
the model to. Defaults to 'deepsparse'.
:param opset: The ONNX opset to use for exporting the model.
Defaults to the latest supported opset.
:param recipe: The path to the recipe to use for exporting the model.
Defaults to None. If a recipe is found in the source_path, it will
be automatically used for export.
:param save_with_external_data: if True, large constant tensors,
such as initializers, will be serialised in a separate file.
Defaults to False. Note: if the model is sufficiently large,
it will be saved with external data regardless of this flag.
:param external_data_chunk_size_mb: The size of the external data
chunks to use for exporting the model. Defaults to None, which
will use the default chunk size. If set, will force the
export with external data.
:param num_export_samples: The number of samples to create for
the exported model. Defaults to 0.
:param deployment_directory_name: The name of the deployment
directory to create for the exported model. Thus, the exported
model will be saved to `target_path/deployment_directory_name`.
Defaults to 'deployment'.
:param device: The device to use for exporting the model.
Defaults to 'auto'.
:param graph_optimizations: The graph optimizations to apply
to the exported model. Defaults to 'all'.
:param validate_correctness: Whether to validate the correctness
of the exported model. Defaults to False.
:param validate_structure: Whether to validate the structure
of the exporter model (contents of the target_path).
:param integration: The name of the integration to use for
exporting the model. Defaults to None, which will infer
the integration from the source_path.
:param sample_data: Optional sample data to use for exporting
the model. If not provided, a dummy input will be created
for the model. Defaults to None.
:param task: Optional task to use for exporting the model.
Defaults to None.
"""
from sparseml.export.export_data import export_data_samples
from sparseml.export.validators import validate_correctness as validate_correctness_
from sparseml.export.validators import validate_structure as validate_structure_
from sparseml.integration_helper_functions import (
IntegrationHelperFunctions,
resolve_integration,
)
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.utils.helpers import default_device
opset = opset or TORCH_DEFAULT_ONNX_OPSET
# start a new SparseSession for potential recipe application
session_manager.create_session()
session_manager.active_session().reset()
if source_path is not None and model is not None:
raise ValueError(
"Not allowed to specify multiple model "
"sources for export: source_path and model. "
"Specify either source_path or model, not both"
)
if source_path is not None:
source_path = process_source_path(source_path)
if target_path is None:
target_path = source_path
integration = resolve_integration(source_path, integration)
_LOGGER.info(f"Starting export for {integration} model...")
if target_path is None:
raise ValueError("targe_path is None. Provide the target_path argument.")
# create the target path if it doesn't exist
if not Path(target_path).exists():
Path(target_path).mkdir(parents=True, exist_ok=True)
# choose the appropriate device
device = default_device() if device == "auto" else device
# assert the valid deployment target
if deployment_target not in AVAILABLE_DEPLOYMENT_TARGETS:
raise ValueError(
"Argument: deployment_target must be "
f"one of {AVAILABLE_DEPLOYMENT_TARGETS}. "
f"Got {deployment_target} instead."
)
deployment_folder_dir = os.path.join(target_path, deployment_directory_name)
if os.path.isdir(deployment_folder_dir):
_LOGGER.warning(
f"Deployment directory at: {deployment_folder_dir} already exists."
"Overwriting the existing deployment directory... "
)
shutil.rmtree(deployment_folder_dir)
helper_functions: IntegrationHelperFunctions = (
IntegrationHelperFunctions.load_from_registry(integration, task=task)
)
loaded_model_kwargs = {}
if model is None:
_LOGGER.info("Creating model for the export...")
model, loaded_model_kwargs = helper_functions.create_model(
source_path,
device=device,
task=task,
recipe=recipe,
**kwargs,
)
model.eval()
# merge arg dictionaries
for arg_name, arg_val in kwargs.items():
if arg_name not in loaded_model_kwargs:
loaded_model_kwargs[arg_name] = arg_val
# once model is loaded we can clear the SparseSession, it was only needed for
# adding structural changes (ie quantization) to the model
session_manager.active_session().reset()
_LOGGER.info("Creating data loader for the export...")
data_loader, loaded_data_loader_kwargs = helper_functions.create_data_loader(
model=model,
task=task,
device=device,
**loaded_model_kwargs,
)
# join kwargs that are created during the initialization of the model
# and data_loader
export_kwargs = {**loaded_model_kwargs, **loaded_data_loader_kwargs}
if export_kwargs:
_LOGGER.info(
"Created additional items that will "
f"be used for the export: {list(export_kwargs.keys())}"
)
sample_data = (
helper_functions.create_dummy_input(data_loader=data_loader, **kwargs)
if sample_data is None
else sample_data
)
_LOGGER.info(f"Exporting {onnx_model_name} to {target_path}...")
export_kwargs = create_export_kwargs(export_kwargs)
onnx_file_path = helper_functions.export(
model=model,
sample_data=sample_data,
target_path=target_path,
onnx_model_name=onnx_model_name,
deployment_target=deployment_target,
opset=opset,
**export_kwargs,
)
_LOGGER.info(f"Successfully exported {onnx_model_name} to {onnx_file_path}...")
if num_export_samples:
_LOGGER.info(f"Exporting {num_export_samples} samples...")
(
input_samples,
output_samples,
label_samples,
) = helper_functions.create_data_samples(
num_samples=num_export_samples, model=model, data_loader=data_loader
)
export_data_samples(
input_samples=input_samples,
output_samples=output_samples,
label_samples=label_samples,
target_path=target_path,
as_tar=False,
)
_LOGGER.info(
f"Creating deployment folder {deployment_directory_name} "
f"at directory: {target_path}..."
)
deployment_folder_dir = create_deployment_folder(
source_path=source_path,
target_path=target_path,
deployment_directory_name=deployment_directory_name,
deployment_directory_files_mandatory=helper_functions.deployment_directory_files_mandatory, # noqa: E501
deployment_directory_files_optional=helper_functions.deployment_directory_files_optional, # noqa: E501
onnx_model_name=onnx_model_name,
)
if validate_correctness:
_LOGGER.info("Validating model correctness...")
if not num_export_samples:
raise ValueError(
"To validate correctness sample inputs/outputs are needed."
"To enable the validation, set `num_export_samples` "
"to positive integer"
)
validate_correctness_(target_path, deployment_folder_dir, onnx_model_name)
_LOGGER.info(
f"Applying optimizations: {graph_optimizations} to the exported model..."
)
if helper_functions.apply_optimizations is not None:
helper_functions.apply_optimizations(
exported_file_path=os.path.join(deployment_folder_dir, onnx_model_name),
optimizations=graph_optimizations,
)
if save_with_external_data is True or external_data_chunk_size_mb:
save_model_with_external_data(
os.path.join(deployment_folder_dir, onnx_model_name),
external_data_chunk_size_mb,
)
if validate_structure and source_path:
_LOGGER.info("Validating model structure...")
validate_structure_(
target_path=target_path,
deployment_directory_name=deployment_directory_name,
onnx_model_name=onnx_model_name,
deployment_directory_files_mandatory=helper_functions.deployment_directory_files_mandatory, # noqa: E501
deployment_directory_files_optional=helper_functions.deployment_directory_files_optional, # noqa: E501
)
_LOGGER.info(
f"Successfully exported model from:\n{target_path}"
f"\nto\n{deployment_folder_dir}\nfor integration: {integration}"
) | Export a PyTorch model that is either: - located in source_path (and will be loaded) - passed directly to the function to target_path. The deployment files will be located at target_path/deployment_directory_name The exporting logic consists of the following steps: 1. Create the model (if required) and the data loader using the integration-specific `create_model` and `create_data_loader` functions. 2. Export the model to ONNX using the integration-specific `export` function. 3. Apply the graph optimizations to the exported model. 4. Create the deployment folder at target_path/deployment_directory_name using the integration-specific `create_deployment_folder` function. 5. Optionally, export samples using the integration-specific `create_data_samples` function. 6. Optionally, validate the correctness of the exported model using the integration-specific `validate_correctness` function. 7. Optionally, validate the structure of the exported model using the integration-specific `validate_structure` function. :param source_path: The path to the PyTorch model to export. Will be omitted if model is provided :param target_path: The path to save the exported model to. If not provided will default to source_path :param model: The PyTorch model to export. If provided, the source_path should be set to None to avoid potential confusion and entaglement of sources. This means that, the full export logic will not be enforced (e.g. the final deployment directory will not be complete, it will not be possible to run validate_structure method or apply some optimizations that require complete deployment directory structure) :param onnx_model_name: The name of the exported model. Defaults to ONNX_MODEL_NAME. :param deployment_target: The deployment target to export the model to. Defaults to 'deepsparse'. :param opset: The ONNX opset to use for exporting the model. Defaults to the latest supported opset. :param recipe: The path to the recipe to use for exporting the model. Defaults to None. If a recipe is found in the source_path, it will be automatically used for export. :param save_with_external_data: if True, large constant tensors, such as initializers, will be serialised in a separate file. Defaults to False. Note: if the model is sufficiently large, it will be saved with external data regardless of this flag. :param external_data_chunk_size_mb: The size of the external data chunks to use for exporting the model. Defaults to None, which will use the default chunk size. If set, will force the export with external data. :param num_export_samples: The number of samples to create for the exported model. Defaults to 0. :param deployment_directory_name: The name of the deployment directory to create for the exported model. Thus, the exported model will be saved to `target_path/deployment_directory_name`. Defaults to 'deployment'. :param device: The device to use for exporting the model. Defaults to 'auto'. :param graph_optimizations: The graph optimizations to apply to the exported model. Defaults to 'all'. :param validate_correctness: Whether to validate the correctness of the exported model. Defaults to False. :param validate_structure: Whether to validate the structure of the exporter model (contents of the target_path). :param integration: The name of the integration to use for exporting the model. Defaults to None, which will infer the integration from the source_path. :param sample_data: Optional sample data to use for exporting the model. If not provided, a dummy input will be created for the model. Defaults to None. :param task: Optional task to use for exporting the model. Defaults to None. |
21,094 | import logging
import os
import shutil
from pathlib import Path
from typing import Any, List, Optional, Union
import numpy
import click
import sparseml.core.session as session_manager
from sparseml.export.helpers import (
AVAILABLE_DEPLOYMENT_TARGETS,
ONNX_MODEL_NAME,
create_deployment_folder,
create_export_kwargs,
process_source_path,
save_model_with_external_data,
)
from sparseml.utils.helpers import parse_kwarg_tuples
from sparsezoo.utils.numpy import load_numpy
def _parse_graph_optimizations(graph_optimizations):
if "," in graph_optimizations:
return graph_optimizations.split(",")
elif graph_optimizations.lower() in ["none", "null", "", "false", "0"]:
return None
return graph_optimizations | null |
21,095 | import logging
import os
import shutil
from pathlib import Path
from typing import Any, List, Optional, Union
import numpy
import click
import sparseml.core.session as session_manager
from sparseml.export.helpers import (
AVAILABLE_DEPLOYMENT_TARGETS,
ONNX_MODEL_NAME,
create_deployment_folder,
create_export_kwargs,
process_source_path,
save_model_with_external_data,
)
from sparseml.utils.helpers import parse_kwarg_tuples
from sparsezoo.utils.numpy import load_numpy
def _parse_sample_data(
sample_data: Union[None, Path, str]
) -> Union[None, numpy.ndarray]:
if sample_data is None:
return None
elif sample_data.endswith((".npz", ".npy")):
return load_numpy(sample_data)
else:
raise NotImplementedError(
"Only numpy files (.npy) are supported for sample_data"
) | null |
21,096 | import os
from pathlib import Path
from typing import Union
import onnx
import torch
from sparseml.exporters import ExportTargets
from sparseml.exporters.onnx_to_deepsparse import ONNXToDeepsparse
from sparseml.pytorch.opset import TORCH_DEFAULT_ONNX_OPSET
from sparseml.pytorch.torch_to_onnx_exporter import TorchToONNX
class ExportTargets(Enum):
"""
Holds the names of the supported export targets
"""
deepsparse = "deepsparse"
onnx = "onnx"
class ONNXToDeepsparse(BaseExporter):
"""
Optimizes an `onnx.ModelProto` for the deepsparse engine by applying a
series of transformations to a onnx graph with quantize operations.
Usage:
```python
# could be a model retrieved previously from TorchToOnnx() or somewhere else
onnx_model: onnx.ModelProto = ...
exporter = ONNXToDeepsparse()
exporter.export(onnx_model, "model.onnx")
```
You can also just optimize the model directly without saving to disk:
```python
onnx_model: onnx.ModelProto = ...
exporter = ONNXToDeepsparse()
optimized_model = exporter.apply(onnx_model)
```
:param use_qlinearconv: Set True to use legacy QLinearConv format instead
of ConvInteger. QLinearConv requires output activations be quantized
in the quantization recipe. (This was the default behavior prior to
sparseml 0.12). Default is False
:param skip_input_quantize: if True, the export flow will attempt to delete
the first Quantize Linear Nodes(s) immediately after model input and set
the model input type to UINT8. Default is False
:param inplace: If true, does conversion of model in place. Default is true
:param export_input_model: If true, saves the input onnx model alongside the
optimized model.
"""
def __init__(
self,
use_qlinear_conv: bool = False,
use_qlinear_matmul: bool = False,
skip_input_quantize: bool = False,
inplace: bool = True,
export_input_model: bool = False,
):
self.inplace = inplace
self.export_input_model = export_input_model
transforms = [
sparseml_transforms.ConstantsToInitializers(),
sparseml_transforms.FoldIdentityInitializers(),
sparseml_transforms.InitializersToUint8(),
sparseml_transforms.FlattenQParams(),
sparseml_transforms.FoldConvDivBn(),
sparseml_transforms.DeleteRepeatedQdq(),
sparseml_transforms.QuantizeQATEmbedding(),
sparseml_transforms.PropagateEmbeddingQuantization(),
sparseml_transforms.PropagateDequantThroughSplit(),
]
if use_qlinear_matmul:
transforms.append(
sparseml_transforms.MatMulToQLinearMatMul(),
)
transforms.extend(
[
sparseml_transforms.MatMulAddToMatMulIntegerAddCastMul(),
sparseml_transforms.MatMulToMatMulIntegerCastMul(),
sparseml_transforms.FoldReLUQuants(),
sparseml_transforms.ConvToQLinearConv()
if use_qlinear_conv
else sparseml_transforms.ConvToConvIntegerAddCastMul(),
sparseml_transforms.GemmToQLinearMatMul(),
sparseml_transforms.GemmToMatMulIntegerAddCastMul(),
sparseml_transforms.QuantizeResiduals(),
sparseml_transforms.RemoveDuplicateQConvWeights(),
sparseml_transforms.RemoveDuplicateQuantizeOps(),
]
)
if skip_input_quantize:
transforms.append(sparseml_transforms.SkipInputQuantize())
super().__init__(transforms)
def pre_validate(self, model: Union[onnx.ModelProto, str, Path]) -> onnx.ModelProto:
if isinstance(model, (str, Path)):
model = onnx.load(str(model))
if not isinstance(model, onnx.ModelProto):
raise TypeError(f"Expected onnx.ModelProto, found {type(model)}")
return model if self.inplace else deepcopy(model)
def post_validate(self, model: onnx.ModelProto) -> onnx.ModelProto:
# sanity check
if not isinstance(model, onnx.ModelProto):
raise TypeError(f"Expected onnx.ModelProto, found {type(model)}")
return model
def export(
self,
pre_transforms_model: Union[ModelProto, str],
file_path: str,
do_split_external_data: bool = True,
):
if not isinstance(pre_transforms_model, ModelProto):
pre_transforms_model = onnx.load(pre_transforms_model)
if self.export_input_model or os.getenv("SAVE_PREQAT_ONNX", False):
save_onnx(
pre_transforms_model,
file_path.replace(".onnx", ".preqat.onnx"),
do_split_external_data=do_split_external_data,
)
post_transforms_model: onnx.ModelProto = self.apply(pre_transforms_model)
save_onnx(
post_transforms_model,
file_path,
do_split_external_data=do_split_external_data,
)
TORCH_DEFAULT_ONNX_OPSET = _default_opset()
class TorchToONNX(BaseExporter):
"""
Transforms a `torch.nn.Module` into an `onnx.ModelProto` using `torch.onnx.export`.
Example usage:
```python
model = torchvision.models.resnet18().eval()
exporter = TorchToONNX(sample_batch=torch.randn(1, 3, 224, 224))
exporter.export(model, "resnest18.onnx")
```
:param sample_batch: the batch to export an onnx for, handles creating the
static graph for onnx as well as setting dimensions
:param opset: onnx opset to use for exported model.
Default is based on torch version.
:param disable_bn_fusing: torch >= 1.7.0 only. Set True to disable batch norm
fusing during torch export. Default and suggested setting is True. Batch
norm fusing will change the exported parameter names as well as affect
sensitivity analyses of the exported graph. Additionally, the DeepSparse
inference engine, and other engines, perform batch norm fusing at model
compilation.
:param export_kwargs: kwargs to be passed as is to the torch.onnx.export api
call. Useful to pass in dynamic_axes, input_names, output_names, etc.
See more on the torch.onnx.export api spec in the PyTorch docs:
https://pytorch.org/docs/stable/onnx.html
"""
def __init__(
self,
sample_batch: Any,
opset: int = TORCH_DEFAULT_ONNX_OPSET,
disable_bn_fusing: bool = True,
**export_kwargs,
):
super().__init__(
[
# NOTE: this first transform is what transforms
# the torch.nn.Module into an onnx.ModelProto
_TorchOnnxExport(
sample_batch, opset, disable_bn_fusing, **export_kwargs
),
# NOTE: the remainder of these act on onnx.ModelProto
sparseml_transforms.FoldIdentityInitializers(),
sparseml_transforms.FlattenQParams(),
sparseml_transforms.UnwrapBatchNorms(),
sparseml_transforms.DeleteTrivialOnnxAdds(),
],
)
def pre_validate(self, module: torch.nn.Module) -> torch.nn.Module:
if not isinstance(module, torch.nn.Module):
raise TypeError(f"Expected torch.nn.Module, found {type(module)}")
if is_parallel_model(module):
module = module.module
return deepcopy(module).to("cpu").eval()
def post_validate(self, model: onnx.ModelProto) -> onnx.ModelProto:
if not isinstance(model, onnx.ModelProto):
raise TypeError(f"Expected onnx.ModelProto, found {type(model)}")
return model
def export(
self,
pre_transforms_model: torch.nn.Module,
file_path: str,
do_split_external_data: bool = True,
):
post_transforms_model: onnx.ModelProto = self.apply(pre_transforms_model)
save_onnx(
model=post_transforms_model,
model_path=file_path,
do_split_external_data=do_split_external_data,
)
self.remove_leftover_files()
def remove_leftover_files(self):
"""
Remove any leftover files created by the exporter
during intermediate steps of the export process.
The files are being stored by the _TorchOnnxExport transform.
"""
torch_onnx_export_transform = self.transforms[0]
assert isinstance(
torch_onnx_export_transform, _TorchOnnxExport
), "Expected the first transform from self.transform to be _TorchOnnxExport"
for file in torch_onnx_export_transform.leftover_files:
os.remove(file)
The provided code snippet includes necessary dependencies for implementing the `export_model` function. Write a Python function `def export_model( model: torch.nn.Module, sample_data: torch.Tensor, target_path: Union[Path, str], onnx_model_name: str, deployment_target: str = "deepsparse", opset: int = TORCH_DEFAULT_ONNX_OPSET, **kwargs, ) -> str` to solve the following problem:
Exports the torch model to the deployment target :param model: The torch model to export :param sample_data: The sample data to use for the export :param target_path: The path to export the model to :param onnx_model_name: The name to save the exported ONNX model as :param deployment_target: The deployment target to export to. Defaults to deepsparse :param opset: The opset to use for the export. Defaults to TORCH_DEFAULT_ONNX_OPSET :param kwargs: Additional kwargs to pass to the TorchToONNX exporter :return: The path to the exported model
Here is the function:
def export_model(
model: torch.nn.Module,
sample_data: torch.Tensor,
target_path: Union[Path, str],
onnx_model_name: str,
deployment_target: str = "deepsparse",
opset: int = TORCH_DEFAULT_ONNX_OPSET,
**kwargs,
) -> str:
"""
Exports the torch model to the deployment target
:param model: The torch model to export
:param sample_data: The sample data to use for the export
:param target_path: The path to export the model to
:param onnx_model_name: The name to save the exported ONNX model as
:param deployment_target: The deployment target to export to. Defaults to deepsparse
:param opset: The opset to use for the export. Defaults to TORCH_DEFAULT_ONNX_OPSET
:param kwargs: Additional kwargs to pass to the TorchToONNX exporter
:return: The path to the exported model
"""
model.eval()
path_to_exported_model = os.path.join(target_path, onnx_model_name)
exporter = TorchToONNX(sample_batch=sample_data, opset=opset, **kwargs)
# If performing deepsparse transforms, don't split the initial onnx export
do_deploy_deepsparse = deployment_target == ExportTargets.deepsparse.value
exporter.export(
model, path_to_exported_model, do_split_external_data=(not do_deploy_deepsparse)
)
if do_deploy_deepsparse:
exporter = ONNXToDeepsparse()
model = onnx.load(path_to_exported_model)
exporter.export(model, path_to_exported_model, do_split_external_data=True)
return path_to_exported_model
if deployment_target == ExportTargets.onnx.value:
return path_to_exported_model
else:
raise ValueError(f"Unsupported deployment target: {deployment_target}") | Exports the torch model to the deployment target :param model: The torch model to export :param sample_data: The sample data to use for the export :param target_path: The path to export the model to :param onnx_model_name: The name to save the exported ONNX model as :param deployment_target: The deployment target to export to. Defaults to deepsparse :param opset: The opset to use for the export. Defaults to TORCH_DEFAULT_ONNX_OPSET :param kwargs: Additional kwargs to pass to the TorchToONNX exporter :return: The path to the exported model |
21,097 | import glob
import logging
import os.path
from collections import OrderedDict
from pathlib import Path
from typing import Callable, List, Optional, Union
import numpy
from sparseml.export.export_data import InputsNames, LabelNames, OutputsNames
from sparseml.export.helpers import ONNX_MODEL_NAME, onnx_data_files
from sparsezoo.utils.numpy import load_numpy
_LOGGER = logging.getLogger(__name__)
def validate_structure_external_data(
deployment_directory_path: Union[str, Path], onnx_model_name: Union[str, Path]
):
files_present = onnx_data_files(
onnx_model_name.replace(".onnx", ".data"), deployment_directory_path
)
if files_present:
_LOGGER.info(
f"Exported model contains {len(files_present)} external data files"
)
def check_file_presence(file_paths: List[str]) -> List[str]:
"""
Check if the files exist in the given paths.
:param file_paths: The list of paths to check.
Paths can be either directories or files.
:return The list of missing file paths.
"""
missing_files = []
for file_path in file_paths:
if not os.path.exists(file_path):
missing_files.append(file_path)
return missing_files
class LabelNames(Enum):
basename = "sample-labels"
filename = "lab"
class OutputsNames(Enum):
basename = "sample-outputs"
filename = "out"
class InputsNames(Enum):
basename = "sample-inputs"
filename = "inp"
ONNX_MODEL_NAME = "model.onnx"
The provided code snippet includes necessary dependencies for implementing the `validate_structure` function. Write a Python function `def validate_structure( target_path: Union[str, Path], deployment_directory_name: str, onnx_model_name: str, deployment_directory_files_mandatory: List[str], deployment_directory_files_optional: Optional[List[str]] = None, )` to solve the following problem:
Validates the structure of the targe_path by checking if the expected files, that should exist as a result of the export, are present. :param target_path: The directory where the exported files are stored. :param deployment_directory_name: The name of the deployment directory. :param onnx_model_name: The name of the ONNX model. :param deployment_directory_files_mandatory: The list of files that should be present in the deployment directory. :param deployment_directory_files_optional: The list of files that can be optionally present in the deployment directory.
Here is the function:
def validate_structure(
target_path: Union[str, Path],
deployment_directory_name: str,
onnx_model_name: str,
deployment_directory_files_mandatory: List[str],
deployment_directory_files_optional: Optional[List[str]] = None,
):
"""
Validates the structure of the targe_path by
checking if the expected files, that should exist as a result
of the export, are present.
:param target_path: The directory where the exported files are stored.
:param deployment_directory_name: The name of the deployment directory.
:param onnx_model_name: The name of the ONNX model.
:param deployment_directory_files_mandatory: The list of files that
should be present in the deployment directory.
:param deployment_directory_files_optional: The list of files that
can be optionally present in the deployment directory.
"""
deployment_directory_path = os.path.join(target_path, deployment_directory_name)
validate_structure_external_data(
deployment_directory_path, onnx_model_name=onnx_model_name
)
sample_files = {InputsNames, OutputsNames, LabelNames}
# account for the potentially custom ONNX model name
deployment_directory_files_mandatory = [
onnx_model_name if file_name == ONNX_MODEL_NAME else file_name
for file_name in deployment_directory_files_mandatory
]
# obtain full paths
deployment_directory_files_mandatory = {
os.path.join(deployment_directory_path, file_name)
for file_name in deployment_directory_files_mandatory
}
deployment_directory_files_optional = {
os.path.join(deployment_directory_path, file_name)
for file_name in deployment_directory_files_optional or []
}
# obtain full paths for the potential sample files
optional_files = {
os.path.join(target_path, name.basename.value) for name in sample_files
}
optional_files.update(deployment_directory_files_optional)
missing_mandatory_files = check_file_presence(deployment_directory_files_mandatory)
missing_optional_files = check_file_presence(optional_files)
if missing_optional_files:
for file_path in missing_optional_files:
_LOGGER.warning(f"File {file_path} is missing.")
if missing_mandatory_files:
for file_path in missing_mandatory_files:
raise FileNotFoundError(f"File {file_path} is missing.") | Validates the structure of the targe_path by checking if the expected files, that should exist as a result of the export, are present. :param target_path: The directory where the exported files are stored. :param deployment_directory_name: The name of the deployment directory. :param onnx_model_name: The name of the ONNX model. :param deployment_directory_files_mandatory: The list of files that should be present in the deployment directory. :param deployment_directory_files_optional: The list of files that can be optionally present in the deployment directory. |
21,098 | import glob
import logging
import os.path
from collections import OrderedDict
from pathlib import Path
from typing import Callable, List, Optional, Union
import numpy
from sparseml.export.export_data import InputsNames, LabelNames, OutputsNames
from sparseml.export.helpers import ONNX_MODEL_NAME, onnx_data_files
from sparsezoo.utils.numpy import load_numpy
_LOGGER = logging.getLogger(__name__)
def top_k_match(
ground_truth: numpy.ndarray, prediction: numpy.ndarray, k: int = 2
) -> bool:
"""
Checks if the top k predictions match the ground truth.
:param ground_truth: The ground truth array.
:param prediction: The prediction array.
:param k: The number of top predictions to consider.
"""
top_k_prediction = numpy.argsort(prediction.flatten())[-k:]
top_k_ground_truth = numpy.argsort(ground_truth.flatten())[-k:]
return numpy.all(top_k_prediction == top_k_ground_truth)
class OutputsNames(Enum):
basename = "sample-outputs"
filename = "out"
class InputsNames(Enum):
basename = "sample-inputs"
filename = "inp"
The provided code snippet includes necessary dependencies for implementing the `validate_correctness` function. Write a Python function `def validate_correctness( target_path: Union[str, Path], directory: Union[str, Path], onnx_model_name: str, validation_function: Callable[..., bool] = top_k_match, ) -> bool` to solve the following problem:
Validates the correctness of the exported ONNX model by running it on a set of sample inputs and comparing the resulting outputs using a validation function. :param target_path: The directory where the sample inputs and outputs are stored. :param directory: The directory where the ONNX model is stored. :param onnx_model_name: The name of the ONNX model. :param validation_function: The function that will be used to validate the outputs. :return: True if the validation passes, False otherwise.
Here is the function:
def validate_correctness(
target_path: Union[str, Path],
directory: Union[str, Path],
onnx_model_name: str,
validation_function: Callable[..., bool] = top_k_match,
) -> bool:
"""
Validates the correctness of the exported ONNX model by
running it on a set of sample inputs and comparing the
resulting outputs using a validation function.
:param target_path: The directory where the sample inputs and outputs are stored.
:param directory: The directory where the ONNX model is stored.
:param onnx_model_name: The name of the ONNX model.
:param validation_function: The function that will be used to validate the outputs.
:return: True if the validation passes, False otherwise.
"""
try:
import onnxruntime as ort
except ImportError as err:
raise ImportError(
"The onnxruntime package is required for the correctness validation. "
"Please install it using 'pip install sparseml[onnxruntime]'."
) from err
sample_inputs_path = os.path.join(target_path, InputsNames.basename.value)
sample_outputs_path = os.path.join(target_path, OutputsNames.basename.value)
sample_inputs_files = sorted(glob.glob(os.path.join(sample_inputs_path, "*")))
sample_outputs_files = sorted(glob.glob(os.path.join(sample_outputs_path, "*")))
session = ort.InferenceSession(os.path.join(directory, onnx_model_name))
validations = (
[]
) # stores boolean per sample pair (True if validation passes, False otherwise)
for sample_input_file, sample_output_file in zip(
sample_inputs_files, sample_outputs_files
):
sample_input = load_numpy(sample_input_file)
sample_output = load_numpy(sample_output_file)
sample_input_with_batch_dim = OrderedDict(
(key, numpy.expand_dims(value, 0)) for key, value in sample_input.items()
)
outputs = session.run(None, sample_input_with_batch_dim)
if isinstance(outputs, list):
validations_sample = []
for o1, o2 in zip(outputs, sample_output.values()):
validations_sample.append(validation_function(o1, o2))
validations.append(all(validations_sample))
else:
validations.append(validation_function(outputs, sample_output))
if not all(validations):
_LOGGER.error(
f"Correctness validation failed for exported model: {onnx_model_name}. "
"The model outputs match the expected outputs "
f"only for {sum(validations)}/{len(validations)} samples "
f"(according to the validation function: {validation_function.__name__}. "
f"Some failures are expected in the case of quantized models, but not in "
f"the case of non-quantized models. If in doubt, validate the performance "
f"of the exported ONNX model using the NeuralMagic evaluation module."
)
return False
_LOGGER.info(
f"Successfully validated the exported model on all {len(validations)} samples."
)
return True | Validates the correctness of the exported ONNX model by running it on a set of sample inputs and comparing the resulting outputs using a validation function. :param target_path: The directory where the sample inputs and outputs are stored. :param directory: The directory where the ONNX model is stored. :param onnx_model_name: The name of the ONNX model. :param validation_function: The function that will be used to validate the outputs. :return: True if the validation passes, False otherwise. |
21,099 | from typing import Optional
from sparseml.pytorch import recipe_template
The provided code snippet includes necessary dependencies for implementing the `create_recipe` function. Write a Python function `def create_recipe( model: Optional["Module"] = None, # noqa: F821 pruning: str = "true", quant: bool = True, lr_func: str = "linear", **recipe_args, ) -> str` to solve the following problem:
Convenience function to create a recipe based on supplied args and kwargs :param model: an instantiated PyTorch Module, or the local path to a torch.jit loadable *.pt file, if supplied then the recipe is built according to this architecture :param pruning: optional pruning algorithm to use in the recipe, can be any of the following, `true` (represents Magnitude/Global-Magnitude pruning according to global_sparsity), `false` (No pruning), `acdc`, `mfac`, `movement`, `obs` or `constant`. Defaults to `true` :param quant: `True` if quantization needs to be applied else `False`. Defaults to `True` :param lr_func: the learning rate schedule function. Defaults to `linear` :param recipe_args: additional arguments to pass to recipe_template :return: a valid recipe
Here is the function:
def create_recipe(
model: Optional["Module"] = None, # noqa: F821
pruning: str = "true",
quant: bool = True,
lr_func: str = "linear",
**recipe_args,
) -> str:
"""
Convenience function to create a recipe based on supplied args and kwargs
:param model: an instantiated PyTorch Module, or the local path to a torch.jit
loadable *.pt file, if supplied then the recipe is built according to this
architecture
:param pruning: optional pruning algorithm to use in the recipe, can be any of the
following, `true` (represents Magnitude/Global-Magnitude pruning according to
global_sparsity), `false` (No pruning), `acdc`, `mfac`, `movement`, `obs` or
`constant`. Defaults to `true`
:param quant: `True` if quantization needs to be applied else `False`. Defaults
to `True`
:param lr_func: the learning rate schedule function. Defaults to `linear`
:param recipe_args: additional arguments to pass to recipe_template
:return: a valid recipe
"""
return recipe_template(
model=model,
pruning=pruning,
quantization=quant,
lr=lr_func,
**recipe_args,
) | Convenience function to create a recipe based on supplied args and kwargs :param model: an instantiated PyTorch Module, or the local path to a torch.jit loadable *.pt file, if supplied then the recipe is built according to this architecture :param pruning: optional pruning algorithm to use in the recipe, can be any of the following, `true` (represents Magnitude/Global-Magnitude pruning according to global_sparsity), `false` (No pruning), `acdc`, `mfac`, `movement`, `obs` or `constant`. Defaults to `true` :param quant: `True` if quantization needs to be applied else `False`. Defaults to `True` :param lr_func: the learning rate schedule function. Defaults to `linear` :param recipe_args: additional arguments to pass to recipe_template :return: a valid recipe |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.