repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_xlnet.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLNet model. """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import math import os import sys from io import open import torch from torch import nn from torch.nn import functional as F from torch.nn import CrossEntropyLoss, MSELoss from .modeling_utils import PreTrainedModel, prune_linear_layer, SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits from .configuration_xlnet import XLNetConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) XLNET_PRETRAINED_MODEL_ARCHIVE_MAP = { 'xlnet-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-pytorch_model.bin", 'xlnet-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-pytorch_model.bin", } def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, 'transformer'): if hasattr(model, 'lm_loss'): # We will load also the output bias tf_to_pt_map['model/lm_loss/bias'] = model.lm_loss.bias if hasattr(model, 'sequence_summary') and 'model/sequnece_summary/summary/kernel' in tf_weights: # We will load also the sequence summary tf_to_pt_map['model/sequnece_summary/summary/kernel'] = model.sequence_summary.summary.weight tf_to_pt_map['model/sequnece_summary/summary/bias'] = model.sequence_summary.summary.bias if hasattr(model, 'logits_proj') and config.finetuning_task is not None \ and 'model/regression_{}/logit/kernel'.format(config.finetuning_task) in tf_weights: tf_to_pt_map['model/regression_{}/logit/kernel'.format(config.finetuning_task)] = model.logits_proj.weight tf_to_pt_map['model/regression_{}/logit/bias'.format(config.finetuning_task)] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update({'model/transformer/word_embedding/lookup_table': model.word_embedding.weight, 'model/transformer/mask_emb/mask_emb': model.mask_emb}) # Transformer blocks for i, b in enumerate(model.layer): layer_str = "model/transformer/layer_%d/" % i tf_to_pt_map.update({ layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, }) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update({ 'model/transformer/r_r_bias': r_r_list, 'model/transformer/r_w_bias': r_w_list, 'model/transformer/r_s_bias': r_s_list, 'model/transformer/seg_embed': seg_embed_list}) return tf_to_pt_map def load_tf_weights_in_xlnet(model, config, tf_path): """ Load tf checkpoints in a pytorch model """ try: import numpy as np import tensorflow as tf except ImportError: logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info("Importing {}".format(name)) if name not in tf_weights: logger.info("{} not in tf pre-trained weights, skipping".format(name)) continue array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if 'kernel' in name and ('ff' in name or 'summary' in name or 'logit' in name): logger.info("Transposing") array = np.transpose(array) if isinstance(pointer, list): # Here we will split the TF weigths assert len(pointer) == array.shape[0] for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert p_i.shape == arr_i.shape except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info("Initialize PyTorch weight {} for layer {}".format(name, i)) p_i.data = torch.from_numpy(arr_i) else: try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + '/Adam', None) tf_weights.pop(name + '/Adam_1', None) logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys()))) return model def gelu(x): """ Implementation of the gelu activation function. XLNet is using OpenAI GPT's gelu (not exactly the same as BERT) Also see https://arxiv.org/abs/1606.08415 """ cdf = 0.5 * (1.0 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) return x * cdf def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} try: from apex.normalization.fused_layer_norm import FusedLayerNorm as XLNetLayerNorm except (ImportError, AttributeError) as e: logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") from torch.nn import LayerNorm as XLNetLayerNorm class XLNetRelativeAttention(nn.Module): def __init__(self, config): super(XLNetRelativeAttention, self).__init__() self.output_attentions = config.output_attentions if config.d_model % config.n_head != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.d_model, config.n_head)) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head ** 0.5) self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head)) self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.dropout) def prune_heads(self, heads): raise NotImplementedError @staticmethod def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = x.shape x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3]) x = x[1:, ...] x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3]) # x = x[:, 0:klen, :, :] x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x @staticmethod def rel_shift_bnij(x, klen=-1): x_size = x.shape x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2]) x = x[:, :, 1:, :] x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3]-1) # Note: the tensor-slice form was faster in my testing than torch.index_select # However, tracing doesn't like the nature of the slice, and if klen changes # during the run then it'll fail, whereas index_select will be fine. x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long)) # x = x[:, :, :, :klen] return x def rel_attn_core(self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None): """Core relative positional attention operations.""" # content based attention score ac = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_w_bias, k_head_h) # position based attention score bd = torch.einsum('ibnd,jbnd->bnij', q_head + self.r_r_bias, k_head_r) bd = self.rel_shift_bnij(bd, klen=ac.shape[3]) # segment based attention score if seg_mat is None: ef = 0 else: ef = torch.einsum('ibnd,snd->ibns', q_head + self.r_s_bias, self.seg_embed) ef = torch.einsum('ijbs,ibns->bnij', seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == torch.float16: attn_score = attn_score - 65500 * torch.einsum('ijbn->bnij', attn_mask) else: attn_score = attn_score - 1e30 * torch.einsum('ijbn->bnij', attn_mask) # attention probability attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropout(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * torch.einsum('ijbn->bnij', head_mask) # attention output attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, v_head_h) if self.output_attentions: return attn_vec, torch.einsum('bnij->ijbn', attn_prob) return attn_vec def post_attention(self, h, attn_vec, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = torch.einsum('ibnd,hnd->ibh', attn_vec, self.o) attn_out = self.dropout(attn_out) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def forward(self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None): if g is not None: ###### Two-stream attention with relative positional encoding. # content based attention score if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content-based key head k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k) # content-based value head v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v) # position-based key head k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r) ##### h-stream # content-stream query head q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask) if self.output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h) ##### g-stream # query-stream query head q_head_g = torch.einsum('ibh,hnd->ibnd', g, self.q) # core attention ops if target_mapping is not None: q_head_g = torch.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask) if self.output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = torch.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask) if self.output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g) if self.output_attentions: attn_prob = attn_prob_h, attn_prob_g else: ###### Multi-head attention with relative positional encoding if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content heads q_head_h = torch.einsum('ibh,hnd->ibnd', h, self.q) k_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.k) v_head_h = torch.einsum('ibh,hnd->ibnd', cat, self.v) # positional heads k_head_r = torch.einsum('ibh,hnd->ibnd', r, self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask) if self.output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec) output_g = None outputs = (output_h, output_g) if self.output_attentions: outputs = outputs + (attn_prob,) return outputs class XLNetFeedForward(nn.Module): def __init__(self, config): super(XLNetFeedForward, self).__init__() self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps) self.layer_1 = nn.Linear(config.d_model, config.d_inner) self.layer_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.dropout) if isinstance(config.ff_activation, str) or \ (sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode)): self.activation_function = ACT2FN[config.ff_activation] else: self.activation_function = config.ff_activation def forward(self, inp): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output) output = self.layer_2(output) output = self.dropout(output) output = self.layer_norm(output + inp) return output class XLNetLayer(nn.Module): def __init__(self, config): super(XLNetLayer, self).__init__() self.rel_attn = XLNetRelativeAttention(config) self.ff = XLNetFeedForward(config) self.dropout = nn.Dropout(config.dropout) def forward(self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None): outputs = self.rel_attn(output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=mems, target_mapping=target_mapping, head_mask=head_mask) output_h, output_g = outputs[:2] if output_g is not None: output_g = self.ff(output_g) output_h = self.ff(output_h) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs class XLNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = XLNetConfig pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_xlnet base_model_prefix = "transformer" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, XLNetLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, XLNetRelativeAttention): for param in [module.q, module.k, module.v, module.o, module.r, module.r_r_bias, module.r_s_bias, module.r_w_bias, module.seg_embed]: param.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XLNetModel): module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) XLNET_START_DOCSTRING = r""" The XLNet model was proposed in `XLNet: Generalized Autoregressive Pretraining for Language Understanding`_ by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order. The specific attention pattern can be controlled at training and test time using the `perm_mask` input. Do to the difficulty of training a fully auto-regressive model over various factorization order, XLNet is pretrained using only a sub-set of the output tokens as target which are selected with the `target_mapping` input. To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and `target_mapping` inputs to control the attention span and outputs (see examples in `examples/run_generation.py`) This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`XLNet: Generalized Autoregressive Pretraining for Language Understanding`: http://arxiv.org/abs/1906.08237 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. XLNet is a model with relative position embeddings so you can either pad the inputs on the right or on the left. Indices can be obtained using :class:`transformers.XLNetTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and the important thing is that they should be different for tokens which belong to different segments. The model will compute relative segment differences from the given type indices: 0 if the segment id of two tokens are the same, 1 if not. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **mems**: (`optional`) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as output by the model (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context. To activate mems you need to set up config.mem_len to a positive value which will be the max number of tokens in the memory output by the model. E.g. `model = XLNetModel.from_pretrained('xlnet-base-case, mem_len=1024)` will instantiate a model which can use up to 1024 tokens of memory (in addition to the input it self). **perm_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, sequence_length)``: Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``: If ``perm_mask[k, i, j] = 0``, i attend to j in batch k; if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k. If None, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). **target_mapping**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_predict, sequence_length)``: Mask to indicate the output tokens to use. If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The type indices in XLNet are NOT selected in the vocabulary, they can be arbitrary numbers and the important thing is that they should be different for tokens which belong to different segments. The model will compute relative segment differences from the given type indices: 0 if the segment id of two tokens are the same, 1 if not. **input_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding. Kept for compatibility with the original code base. You can only uses one of `input_mask` and `attention_mask` Mask values selected in ``[0, 1]``: ``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) class XLNetModel(XLNetPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **mems**: (`optional`, returned when ``config.mem_len > 0``) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. See details in the docstring of the `mems` input above. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') model = XLNetModel.from_pretrained('xlnet-large-cased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(XLNetModel, self).__init__(config) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.output_past = config.output_past self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.n_token, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) self.init_weights() def _resize_token_embeddings(self, new_num_tokens): self.word_embedding = self._get_resized_embeddings(self.word_embedding, new_num_tokens) return self.word_embedding def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: TODO Lysandre didn't fill mlen: TODO Lysandre didn't fill :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ attn_mask = torch.ones([qlen, qlen]) mask_up = torch.triu(attn_mask, diagonal=1) attn_mask_pad = torch.zeros([qlen, mlen]) ret = torch.cat([attn_mask_pad, mask_up], dim=1) if self.same_length: mask_lo = torch.tril(attn_mask, diagonal=-1) ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1) ret = ret.to(next(self.parameters())) return ret def cache_mem(self, curr_out, prev_mem): """cache hidden states into memory.""" if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[:self.reuse_len] if prev_mem is None: new_mem = curr_out[-self.mem_len:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum('i,d->id', pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): """create relative positional encoding.""" freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float) inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == 'bi': # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == 'uni': # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type)) if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float) bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float) if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz//2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz//2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0) if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) pos_emb = pos_emb.to(next(self.parameters())) return pos_emb def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None): # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end input_ids = input_ids.transpose(0, 1).contiguous() token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None qlen, bsz = input_ids.shape[0], input_ids.shape[1] mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = next(self.parameters()).dtype device = next(self.parameters()).device ##### Attention mask # causal attention mask if self.attn_type == 'uni': attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == 'bi': attn_mask = None else: raise ValueError('Unsupported attention type: {}'.format(self.attn_type)) # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None ##### Word embeddings and prepare h & g hidden states word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None ##### Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None ##### Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = self.dropout(pos_emb) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] hidden_states = [] for i, layer_module in enumerate(self.layer): if self.mem_len is not None and self.mem_len > 0 and self.output_past: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if self.output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module(output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i]) output_h, output_g = outputs[:2] if self.output_attentions: attentions.append(outputs[2]) # Add last hidden state if self.output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) outputs = (output.permute(1, 0, 2).contiguous(),) if self.mem_len is not None and self.mem_len > 0 and self.output_past: outputs = outputs + (new_mems,) if self.output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) outputs = outputs + (hidden_states,) if self.output_attentions: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) outputs = outputs + (attentions,) return outputs # outputs, (new_mems), (hidden_states), (attentions) @add_start_docstrings("""XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) class XLNetLMHeadModel(XLNetPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to ``-1`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **mems**: (`optional`, returned when ``config.mem_len > 0``) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. See details in the docstring of the `mems` input above. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased') # We show how to setup inputs to predict a next token using a bi-directional context. input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>")).unsqueeze(0) # We will predict the masked token perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] """ def __init__(self, config): super(XLNetLMHeadModel, self).__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the embeddings """ self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding) def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask) logits = self.lm_loss(transformer_outputs[0]) outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if labels is not None: # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-1) loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) outputs = (loss,) + outputs return outputs # return (loss), logits, (mems), (hidden states), (attentions) @add_start_docstrings("""XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) class XLNetForSequenceClassification(XLNetPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **mems**: (`optional`, returned when ``config.mem_len > 0``) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. See details in the docstring of the `mems` input above. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(XLNetForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) self.init_weights() def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # return (loss), logits, (mems), (hidden states), (attentions) @add_start_docstrings("""XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks. """, XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) class XLNetForMultipleChoice(XLNetPreTrainedModel): r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: Indices of input sequence tokens in the vocabulary. The second dimension of the input (`num_choices`) indicates the number of choices to scores. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. The second dimension of the input (`num_choices`) indicates the number of choices to score. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``: Mask to avoid performing attention on padding token indices. The second dimension of the input (`num_choices`) indicates the number of choices to score. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). **mems**: (`optional`, returned when ``config.mem_len > 0``) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. See details in the docstring of the `mems` input above. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') model = XLNetForMultipleChoice.from_pretrained('xlnet-base-cased') choices = ["Hello, my dog is cute", "Hello, my cat is amazing"] input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices labels = torch.tensor(1).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, classification_scores = outputs[:2] """ def __init__(self, config): super(XLNetForMultipleChoice, self).__init__(config) self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) self.init_weights() def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, labels=None, head_mask=None): num_choices = input_ids.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None transformer_outputs = self.transformer(flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, head_mask=head_mask) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) outputs = (loss,) + outputs return outputs # return (loss), logits, (mems), (hidden states), (attentions) @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **mems**: (`optional`, returned when ``config.mem_len > 0``) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. See details in the docstring of the `mems` input above. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss, start_scores, end_scores = outputs[:2] """ def __init__(self, config): super(XLNetForQuestionAnsweringSimple, self).__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, start_positions=None, end_positions=None): outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions) @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) class XLNetForQuestionAnswering(XLNetPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels whether a question has an answer or no answer (SQuAD 2.0) **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the classification token to use as input for computing plausibility of the answer. **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)`` Log probabilities for the top config.start_n_top start token possibilities (beam-search). **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)`` Indices for the top config.start_n_top start token possibilities (beam-search). **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) ``torch.FloatTensor`` of shape ``(batch_size,)`` Log probabilities for the ``is_impossible`` label of the answers. **mems**: (`optional`, returned when ``config.mem_len > 0``) list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. See details in the docstring of the `mems` input above. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss, start_scores, end_scores = outputs[:2] """ def __init__(self, config): super(XLNetForQuestionAnswering, self).__init__(config) self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.transformer = XLNetModel(config) self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) self.init_weights() def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None,): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 outputs = (total_loss,) + outputs else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) # get the representation of START as weighted sum of hidden states cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) # Shape (batch size,): one single `cls_logits` for each sample outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits # or (if labels are provided) (total_loss,) return outputs
72,560
52.002922
169
py
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_xlm.py
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLM model. """ from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import itertools import numpy as np import torch from torch import nn from torch.nn import functional as F from torch.nn import CrossEntropyLoss, MSELoss from .modeling_utils import PreTrainedModel, prune_linear_layer, SequenceSummary, SQuADHead from .configuration_xlm import XLMConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) XLM_PRETRAINED_MODEL_ARCHIVE_MAP = { 'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-pytorch_model.bin", 'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-pytorch_model.bin", 'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-pytorch_model.bin", 'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-pytorch_model.bin", 'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin", 'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-pytorch_model.bin", 'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-pytorch_model.bin", 'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-pytorch_model.bin", 'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-pytorch_model.bin", 'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-pytorch_model.bin", } def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([ [pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos) ]) out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False def gelu(x): """ GELU activation https://arxiv.org/abs/1606.08415 https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/model_pytorch.py#L14 https://github.com/huggingface/transformers/blob/master/modeling.py """ # return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0))) def get_masks(slen, lengths, causal, padding_mask=None): """ Generate hidden states mask, and optionally an attention mask. """ bs = lengths.size(0) if padding_mask is not None: mask = padding_mask else: assert lengths.max().item() <= slen alen = torch.arange(slen, dtype=torch.long, device=lengths.device) mask = alen < lengths[:, None] # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None] else: attn_mask = mask # sanity check assert mask.size() == (bs, slen) assert causal is False or attn_mask.size() == (bs, slen, slen) return mask, attn_mask class MultiHeadAttention(nn.Module): NEW_ID = itertools.count() def __init__(self, n_heads, dim, config): super(MultiHeadAttention, self).__init__() self.layer_id = next(MultiHeadAttention.NEW_ID) self.output_attentions = config.output_attentions self.dim = dim self.n_heads = n_heads self.dropout = config.attention_dropout assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(dim, dim) self.k_lin = nn.Linear(dim, dim) self.v_lin = nn.Linear(dim, dim) self.out_lin = nn.Linear(dim, dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return mask = torch.ones(self.n_heads, attention_head_size) heads = set(heads) - self.pruned_heads for head in heads: head -= sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, input, mask, kv=None, cache=None, head_mask=None): """ Self-attention (if kv is None) or attention over source sentence (provided by kv). """ # Input is (bs, qlen, dim) # Mask is (bs, klen) (non-causal) or (bs, klen, klen) bs, qlen, dim = input.size() if kv is None: klen = qlen if cache is None else cache['slen'] + qlen else: klen = kv.size(1) # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim) n_heads = self.n_heads dim_per_head = self.dim // n_heads mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen) def shape(x): """ projection """ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x): """ compute context """ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head) if kv is None: k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head) elif cache is None or self.layer_id not in cache: k = v = kv k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head) if cache is not None: if self.layer_id in cache: if kv is None: k_, v_ = cache[self.layer_id] k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head) v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head) else: k, v = cache[self.layer_id] cache[self.layer_id] = (k, v) q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head) scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen) mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen) scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen) weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen) weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) context = unshape(context) # (bs, qlen, dim) outputs = (self.out_lin(context),) if self.output_attentions: outputs = outputs + (weights,) return outputs class TransformerFFN(nn.Module): def __init__(self, in_dim, dim_hidden, out_dim, config): super(TransformerFFN, self).__init__() self.dropout = config.dropout self.lin1 = nn.Linear(in_dim, dim_hidden) self.lin2 = nn.Linear(dim_hidden, out_dim) self.act = gelu if config.gelu_activation else F.relu def forward(self, input): x = self.lin1(input) x = self.act(x) x = self.lin2(x) x = F.dropout(x, p=self.dropout, training=self.training) return x class XLMPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = XLMConfig pretrained_model_archive_map = XLM_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = None base_model_prefix = "transformer" def __init__(self, *inputs, **kwargs): super(XLMPreTrainedModel, self).__init__(*inputs, **kwargs) def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if self.config is not None and self.config.embed_init_std is not None: nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std) if isinstance(module, nn.Linear): if self.config is not None and self.config.init_std is not None: nn.init.normal_(module.weight, mean=0, std=self.config.init_std) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, 0.) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) XLM_START_DOCSTRING = r""" The XLM model was proposed in `Cross-lingual Language Model Pretraining`_ by Guillaume Lample*, Alexis Conneau*. It's a transformer pre-trained using one of the following objectives: - a causal language modeling (CLM) objective (next token prediction), - a masked language modeling (MLM) objective (Bert-like), or - a Translation Language Modeling (TLM) object (extension of Bert's MLM to multiple language inputs) Original code can be found `here`_. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`Cross-lingual Language Model Pretraining`: https://arxiv.org/abs/1901.07291 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module .. _`here`: https://github.com/facebookresearch/XLM Parameters: config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XLM_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. XLM is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.XLMTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **langs**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str). **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The embeddings from these tokens will be summed with the respective token embeddings. Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **lengths**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use `attention_mask` for the same result (see above), kept here for compatbility. Indices selected in ``[0, ..., input_ids.size(-1)]``: **cache**: dictionary with ``torch.FloatTensor`` that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential decoding. The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare XLM Model transformer outputting raw hidden-states without any specific head on top.", XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) class XLMModel(XLMPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMModel.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): #, dico, is_encoder, with_output): super(XLMModel, self).__init__(config) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states # encoder / decoder, output layer self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError("Currently XLM can only be used as an encoder") # self.with_output = with_output self.causal = config.causal # dictionary / languages self.n_langs = config.n_langs self.use_lang_emb = config.use_lang_emb self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index # self.dico = dico # self.id2lang = config.id2lang # self.lang2id = config.lang2id # assert len(self.dico) == self.n_words # assert len(self.id2lang) == len(self.lang2id) == self.n_langs # model parameters self.dim = config.emb_dim # 512 by default self.hidden_dim = self.dim * 4 # 2048 by default self.n_heads = config.n_heads # 8 by default self.n_layers = config.n_layers self.dropout = config.dropout self.attention_dropout = config.attention_dropout assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads' # embeddings self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim) if config.sinusoidal_embeddings: create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight) if config.n_langs > 1 and config.use_lang_emb: self.lang_embeddings = nn.Embedding(self.n_langs, self.dim) self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index) self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps) # transformer layers self.attentions = nn.ModuleList() self.layer_norm1 = nn.ModuleList() self.ffns = nn.ModuleList() self.layer_norm2 = nn.ModuleList() # if self.is_decoder: # self.layer_norm15 = nn.ModuleList() # self.encoder_attn = nn.ModuleList() for _ in range(self.n_layers): self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config)) self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) # if self.is_decoder: # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout)) self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config)) self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) if hasattr(config, "pruned_heads"): pruned_heads = config.pruned_heads.copy().items() config.pruned_heads = {} for layer, heads in pruned_heads: if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) self.init_weights() def _resize_token_embeddings(self, new_num_tokens): self.embeddings = self._get_resized_embeddings(self.embeddings, new_num_tokens) return self.embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.attentions[layer].prune_heads(heads) def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None): # removed: src_enc=None, src_len=None if lengths is None: lengths = (input_ids != self.pad_index).sum(dim=1).long() # mask = input_ids != self.pad_index # check inputs bs, slen = input_ids.size() assert lengths.size(0) == bs assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) # if src_enc is not None: # assert self.is_decoder # assert src_enc.size(0) == bs # generate masks mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) # if self.is_decoder and src_enc is not None: # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] # position_ids if position_ids is None: position_ids = input_ids.new((slen,)).long() position_ids = torch.arange(slen, out=position_ids).unsqueeze(0) else: assert position_ids.size() == (bs, slen) # (slen, bs) # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: assert langs.size() == (bs, slen) # (slen, bs) # langs = langs.transpose(0, 1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.n_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.n_layers # do not recompute cached elements if cache is not None: _slen = slen - cache['slen'] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] # embeddings tensor = self.embeddings(input_ids) tensor = tensor + self.position_embeddings(position_ids).expand_as(tensor) if langs is not None and self.use_lang_emb: tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = F.dropout(tensor, p=self.dropout, training=self.training) tensor *= mask.unsqueeze(-1).to(tensor.dtype) # transformer layers hidden_states = () attentions = () for i in range(self.n_layers): if self.output_hidden_states: hidden_states = hidden_states + (tensor,) # self attention attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i]) attn = attn_outputs[0] if self.output_attentions: attentions = attentions + (attn_outputs[1],) attn = F.dropout(attn, p=self.dropout, training=self.training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) # encoder attention (for decoder only) # if self.is_decoder and src_enc is not None: # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache) # attn = F.dropout(attn, p=self.dropout, training=self.training) # tensor = tensor + attn # tensor = self.layer_norm15[i](tensor) # FFN tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) tensor *= mask.unsqueeze(-1).to(tensor.dtype) # Add last hidden state if self.output_hidden_states: hidden_states = hidden_states + (tensor,) # update cache length if cache is not None: cache['slen'] += tensor.size(1) # move back sequence length to dimension 0 # tensor = tensor.transpose(0, 1) outputs = (tensor,) if self.output_hidden_states: outputs = outputs + (hidden_states,) if self.output_attentions: outputs = outputs + (attentions,) return outputs # outputs, (hidden_states), (attentions) class XLMPredLayer(nn.Module): """ Prediction layer (cross_entropy or adaptive_softmax). """ def __init__(self, config): super(XLMPredLayer, self).__init__() self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index dim = config.emb_dim if config.asm is False: self.proj = nn.Linear(dim, config.n_words, bias=True) else: self.proj = nn.AdaptiveLogSoftmaxWithLoss( in_features=dim, n_classes=config.n_words, cutoffs=config.asm_cutoffs, div_value=config.asm_div_value, head_bias=True, # default is False ) def forward(self, x, y=None): """ Compute the loss, and optionally the scores. """ outputs = () if self.asm is False: scores = self.proj(x) outputs = (scores,) + outputs if y is not None: loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction='elementwise_mean') outputs = (loss,) + outputs else: scores = self.proj.log_prob(x) outputs = (scores,) + outputs if y is not None: _, loss = self.proj(x, y) outputs = (loss,) + outputs return outputs @add_start_docstrings("""The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) class XLMWithLMHeadModel(XLMPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to ``-1`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(XLMWithLMHeadModel, self).__init__(config) self.transformer = XLMModel(config) self.pred_layer = XLMPredLayer(config) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the embeddings """ self._tie_or_clone_weights(self.pred_layer.proj, self.transformer.embeddings) def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask) output = transformer_outputs[0] outputs = self.pred_layer(output, labels) outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here return outputs @add_start_docstrings("""XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) class XLMForSequenceClassification(XLMPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ def __init__(self, config): super(XLMForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.transformer = XLMModel(config) self.sequence_summary = SequenceSummary(config) self.init_weights() def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask) output = transformer_outputs[0] logits = self.sequence_summary(output) outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs @add_start_docstrings("""XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) class XLMForQuestionAnsweringSimple(XLMPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels whether a question has an answer or no answer (SQuAD 2.0) **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the classification token to use as input for computing plausibility of the answer. **p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss, start_scores, end_scores = outputs[:2] """ def __init__(self, config): super(XLMForQuestionAnsweringSimple, self).__init__(config) self.transformer = XLMModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, start_positions=None, end_positions=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = (start_logits, end_logits,) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here return outputs @add_start_docstrings("""XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) class XLMForQuestionAnswering(XLMPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels whether a question has an answer or no answer (SQuAD 2.0) **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for position (index) of the classification token to use as input for computing plausibility of the answer. **p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-start scores (before SoftMax). **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` Span-end scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 start_positions = torch.tensor([1]) end_positions = torch.tensor([3]) outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) loss, start_scores, end_scores = outputs[:2] """ def __init__(self, config): super(XLMForQuestionAnswering, self).__init__(config) self.transformer = XLMModel(config) self.qa_outputs = SQuADHead(config) self.init_weights() def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask) output = transformer_outputs[0] outputs = self.qa_outputs(output, start_positions=start_positions, end_positions=end_positions, cls_index=cls_index, is_impossible=is_impossible, p_mask=p_mask) outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here return outputs
45,543
50.34611
163
py
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_ctrl.py
# coding=utf-8 # Copyright 2018 Salesforce and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch CTRL model.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import json import logging import math import os import sys from io import open import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from torch.nn.parameter import Parameter from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary from .configuration_ctrl import CTRLConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://storage.googleapis.com/sf-ctrl/pytorch/seqlen256_v1.bin"} def angle_defn(pos, i, d_model_size): angle_rates = 1 / torch.pow(10000, (2 * (i//2)) / d_model_size) return pos * angle_rates def positional_encoding(position, d_model_size, dtype): # create the sinusoidal pattern for the positional encoding angle_rads = (angle_defn(torch.arange(position, dtype=dtype).unsqueeze(1), torch.arange(d_model_size, dtype=dtype).unsqueeze(0), d_model_size)) sines = torch.sin(angle_rads[:, 0::2]) cosines = torch.cos(angle_rads[:, 1::2]) pos_encoding = torch.cat([sines, cosines], dim=-1) return pos_encoding def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None): # calculate attention matmul_qk = torch.matmul(q, k.permute(0,1,3,2)) dk = k.shape[-1] scaled_attention_logits = matmul_qk / np.sqrt(dk) if mask is not None: scaled_attention_logits += (mask * -1e4) if attention_mask is not None: # Apply the attention mask scaled_attention_logits = scaled_attention_logits + attention_mask attention_weights = torch.softmax(scaled_attention_logits, dim=-1) # Mask heads if we want to if head_mask is not None: attention_weights = attention_weights * head_mask output = torch.matmul(attention_weights, v) return output, attention_weights class MultiHeadAttention(torch.nn.Module): def __init__(self, d_model_size, num_heads, output_attentions=False): super(MultiHeadAttention, self).__init__() self.output_attentions = output_attentions self.num_heads = num_heads self.d_model_size = d_model_size self.depth = int(d_model_size / self.num_heads) self.Wq = torch.nn.Linear(d_model_size, d_model_size) self.Wk = torch.nn.Linear(d_model_size, d_model_size) self.Wv = torch.nn.Linear(d_model_size, d_model_size) self.dense = torch.nn.Linear(d_model_size, d_model_size) def split_into_heads(self, x, batch_size): x = x.reshape(batch_size, -1, self.num_heads, self.depth) return x.permute([0, 2, 1, 3]) def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None): batch_size = q.shape[0] q = self.Wq(q) k = self.Wk(k) v = self.Wv(v) q = self.split_into_heads(q, batch_size) k = self.split_into_heads(k, batch_size) v = self.split_into_heads(v, batch_size) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] k = torch.cat((past_key, k), dim=-2) v = torch.cat((past_value, v), dim=-2) present = torch.stack((k, v)) output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask) scaled_attention = output[0].permute([0, 2, 1, 3]) attn = output[1] original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size) output = self.dense(original_size_attention) outputs = (output, present) if self.output_attentions: outputs = outputs + (attn,) return outputs def point_wise_feed_forward_network(d_model_size, dff): return torch.nn.Sequential(torch.nn.Linear(d_model_size, dff), torch.nn.ReLU(), torch.nn.Linear(dff, d_model_size)) class EncoderLayer(torch.nn.Module): def __init__(self, d_model_size, num_heads, dff, rate=0.1, output_attentions=False): super(EncoderLayer, self).__init__() self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, output_attentions) self.ffn = point_wise_feed_forward_network(d_model_size, dff) self.layernorm1 = torch.nn.LayerNorm(d_model_size, eps=1e-6) self.layernorm2 = torch.nn.LayerNorm(d_model_size, eps=1e-6) self.dropout1 = torch.nn.Dropout(rate) self.dropout2 = torch.nn.Dropout(rate) def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None): normed = self.layernorm1(x) attn_outputs = self.multi_head_attention(normed, normed, normed, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask) attn_output = attn_outputs[0] attn_output = self.dropout1(attn_output) out1 = x + attn_output out2 = self.layernorm2(out1) ffn_output = self.ffn(out2) ffn_output = self.dropout2(ffn_output) out2 = out1 + ffn_output outputs = (out2,) + attn_outputs[1:] return outputs class CTRLPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ config_class = CTRLConfig pretrained_model_archive_map = CTRL_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "transformer" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) CTRL_START_DOCSTRING = r""" CTRL model was proposed in `CTRL: A Conditional Transformer Language Model for Controllable Generation`_ by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. It's a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.). This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`CTRL: A Conditional Transformer Language Model for Controllable Generation`: https://www.github.com/salesforce/ctrl .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ CTRL_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. Indices can be obtained using :class:`transformers.CTRLTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **past**: list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past` output below). Can be used to speed up sequential decoding. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: A parallel sequence of tokens (can be used to indicate various portions of the inputs). The embeddings from these tokens will be summed with the respective token embeddings. Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.", CTRL_START_DOCSTRING, CTRL_INPUTS_DOCSTRING) class CTRLModel(CTRLPreTrainedModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = CTRLModel.from_pretrained('ctrl') input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(CTRLModel, self).__init__(config) self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.output_past = config.output_past self.d_model_size = config.n_embd self.num_layers = config.n_layer self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float) self.w = nn.Embedding(config.vocab_size, config.n_embd) self.dropout = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([EncoderLayer(config.n_embd, config.n_head, config.dff, config.resid_pdrop, config.output_attentions) for _ in range(config.n_layer)]) self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.init_weights() def _resize_token_embeddings(self, new_num_tokens): self.w = self._get_resized_embeddings(self.w, new_num_tokens) return self.w def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = past[0][0].size(-2) if position_ids is None: position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # Attention mask. if attention_mask is not None: attention_mask = attention_mask.view(-1, input_shape[-1]) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.n_layer if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) token_type_embeds = self.w(token_type_ids) token_type_embeds *= np.sqrt(self.d_model_size) else: token_type_embeds = 0 position_ids = position_ids.view(-1, input_shape[-1]) inputs_embeds = self.w(input_ids) # inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded seq_len = input_ids.shape[-1] mask = torch.triu(torch.ones(seq_len, seq_len), 1).to(inputs_embeds.device) inputs_embeds *= np.sqrt(self.d_model_size) pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device) hidden_states = inputs_embeds + pos_embeds + token_type_embeds hidden_states = self.dropout(hidden_states) output_shape = input_shape + (inputs_embeds.size(-1),) presents = () all_hidden_states = () all_attentions = [] for i, (h, layer_past) in enumerate(zip(self.h, past)): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),) outputs = h(hidden_states, mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]) hidden_states, present = outputs[:2] if self.output_past: presents = presents + (present,) if self.output_attentions: all_attentions.append(outputs[2]) hidden_states = self.layernorm(hidden_states) hidden_states = hidden_states.view(*output_shape) if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_past: outputs = outputs + (presents,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:] all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions) outputs = outputs + (all_attentions,) return outputs @add_start_docstrings("""The CTRL Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, CTRL_START_DOCSTRING, CTRL_INPUTS_DOCSTRING) class CTRLLMHeadModel(CTRLPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to ``-1`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: import torch from transformers import CTRLTokenizer, CTRLLMHeadModel tokenizer = CTRLTokenizer.from_pretrained('ctrl') model = CTRLLMHeadModel.from_pretrained('ctrl') input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, logits = outputs[:2] """ def __init__(self, config): super(CTRLLMHeadModel, self).__init__(config) self.transformer = CTRLModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.lm_head, self.transformer.w) def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) outputs = (lm_logits,) + transformer_outputs[1:] if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-1) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
23,436
47.22428
134
py
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_transfo_xl.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import glob import logging import os import sys from collections import Counter, OrderedDict from io import open import numpy as np from .file_utils import cached_path from .tokenization_utils import PreTrainedTokenizer try: import torch except ImportError: pass # if sys.version_info[0] == 2: # import cPickle as pickle # else: # import pickle logger = logging.getLogger(__name__) VOCAB_FILES_NAMES = {'pretrained_vocab_file': 'vocab.bin', 'vocab_file': 'vocab.txt'} PRETRAINED_VOCAB_FILES_MAP = { 'pretrained_vocab_file': { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'transfo-xl-wt103': None, } PRETRAINED_CORPUS_ARCHIVE_MAP = { 'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin", } CORPUS_NAME = 'corpus.bin' class TransfoXLTokenizer(PreTrainedTokenizer): """ Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, special=None, min_freq=0, max_size=None, lower_case=False, delimiter=None, vocab_file=None, pretrained_vocab_file=None, never_split=None, unk_token="<unk>", eos_token="<eos>", additional_special_tokens=["<formula>"], **kwargs): super(TransfoXLTokenizer, self).__init__(unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, **kwargs) self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens if never_split is None: never_split = self.all_special_tokens if special is None: special = [] self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file self.never_split = never_split if pretrained_vocab_file is not None: # Hack because, honestly this tokenizer was not made to be used # in a library like ours, at all. vocab_dict = torch.load(pretrained_vocab_file) for key, value in vocab_dict.items(): if key not in self.__dict__: self.__dict__[key] = value if vocab_file is not None: self.build_vocab() def count_file(self, path, verbose=False, add_eos=False): if verbose: logger.info('counting file {} ...'.format(path)) assert os.path.exists(path) sents = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: logger.info(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): """ sents : a list of sentences, each a list of tokenized symbols """ if verbose: logger.info('counting {} sents ...'.format(len(sents))) for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: logger.info(' line {}'.format(idx)) self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) if '<UNK>' in self.sym2idx: self.unk_idx = self.sym2idx['<UNK>'] elif '<unk>' in self.sym2idx: self.unk_idx = self.sym2idx['<unk>'] else: raise ValueError('No <unkown> token in vocabulary') def save_vocabulary(self, vocab_path): """Save the tokenizer vocabulary to a directory or file.""" if os.path.isdir(vocab_path): vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['pretrained_vocab_file']) torch.save(self.__dict__, vocab_file) return (vocab_file,) def build_vocab(self): if self.vocab_file: logger.info('building vocab from {}'.format(self.vocab_file)) self._build_from_file(self.vocab_file) logger.info('final vocab size {}'.format(len(self))) else: logger.info('building vocab with min_freq={}, max_size={}'.format( self.min_freq, self.max_size)) self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for sym, cnt in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) logger.info('final vocab size {} from {} unique tokens'.format( len(self), len(self.counter))) def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: logger.info('encoding file {} ...'.format(path)) assert os.path.exists(path) encoded = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: logger.info(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def encode_sents(self, sents, ordered=False, verbose=False): if verbose: logger.info('encoding {} sents ...'.format(len(sents))) encoded = [] for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: logger.info(' line {}'.format(idx)) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def _convert_id_to_token(self, idx): """Converts an id in a token (BPE) using the vocab.""" assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx) return self.idx2sym[idx] def _convert_token_to_id(self, sym): """ Converts a token (str/unicode) in an id using the vocab. """ if sym in self.sym2idx: return self.sym2idx[sym] else: # logger.info('encounter unk {}'.format(sym)) # assert '<eos>' not in sym if hasattr(self, 'unk_idx'): return self.sym2idx.get(sym, self.unk_idx) # Backward compatibility with pre-trained models elif '<unk>' in self.sym2idx: return self.sym2idx['<unk>'] elif '<UNK>' in self.sym2idx: return self.sym2idx['<UNK>'] else: raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement') def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = ' '.join(tokens).strip() return out_string def convert_to_tensor(self, symbols): return torch.LongTensor(self.convert_tokens_to_ids(symbols)) @property def vocab_size(self): return len(self.idx2sym) def _tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() # convert to lower case if self.lower_case: line = line.lower() # empty delimiter '' will evaluate False if self.delimiter == '': symbols = line else: symbols = line.split(self.delimiter) if add_double_eos: # lm1b return ['<S>'] + symbols + ['<S>'] elif add_eos: return symbols + ['<eos>'] else: return symbols class LMOrderedIterator(object): def __init__(self, data, bsz, bptt, device='cpu', ext_len=None): """ data -- LongTensor -- the LongTensor is strictly ordered """ self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device # Work out how cleanly we can divide the dataset into bsz parts. self.n_step = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data.narrow(0, 0, self.n_step * bsz) # Evenly divide the data across the bsz batches. self.data = data.view(bsz, -1).t().contiguous().to(device) # Number of mini-batches self.n_batch = (self.n_step + self.bptt - 1) // self.bptt def get_batch(self, i, bptt=None): if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx] target = self.data[i+1:i+1+seq_len] data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) return data_out, target_out, seq_len def get_fixlen_iter(self, start=0): for i in range(start, self.data.size(0) - 1, self.bptt): yield self.get_batch(i) def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2. bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) data, target, seq_len = self.get_batch(i, bptt) i += seq_len yield data, target, seq_len if i >= self.data.size(0) - 2: break def __iter__(self): return self.get_fixlen_iter() class LMShuffledIterator(object): def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False): """ data -- list[LongTensor] -- there is no order among the LongTensors """ self.data = data self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self): # index iterator epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \ else np.array(range(len(self.data))) # sentence iterator for idx in epoch_indices: yield self.data[idx] def stream_iterator(self, sent_stream): # streams for each data in the batch streams = [None] * self.bsz data = torch.LongTensor(self.bptt, self.bsz) target = torch.LongTensor(self.bptt, self.bsz) n_retain = 0 while True: # data : [n_retain+bptt x bsz] # target : [bptt x bsz] data[n_retain:].fill_(-1) target.fill_(-1) valid_batch = True for i in range(self.bsz): n_filled = 0 try: while n_filled < self.bptt: if streams[i] is None or len(streams[i]) <= 1: streams[i] = next(sent_stream) # number of new tokens to fill in n_new = min(len(streams[i]) - 1, self.bptt - n_filled) # first n_retain tokens are retained from last batch data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \ streams[i][:n_new] target[n_filled:n_filled+n_new, i] = \ streams[i][1:n_new+1] streams[i] = streams[i][n_new:] n_filled += n_new except StopIteration: valid_batch = False break if not valid_batch: return data_out = data.transpose(0, 1).contiguous().to(self.device) target_out = target.transpose(0, 1).contiguous().to(self.device) yield data_out, target_out, self.bptt n_retain = min(data.size(0), self.ext_len) if n_retain > 0: data[:n_retain] = data[-n_retain:] data.resize_(n_retain + self.bptt, data.size(1)) def __iter__(self): # sent_stream is an iterator sent_stream = self.get_sent_stream() for batch in self.stream_iterator(sent_stream): yield batch class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None, shuffle=False): self.paths = paths self.vocab = vocab self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self, path): sents = self.vocab.encode_file(path, add_double_eos=True) if self.shuffle: np.random.shuffle(sents) sent_stream = iter(sents) return sent_stream def __iter__(self): if self.shuffle: np.random.shuffle(self.paths) for path in self.paths: # sent_stream is an iterator sent_stream = self.get_sent_stream(path) for batch in self.stream_iterator(sent_stream): yield batch class TransfoXLCorpus(object): @classmethod def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): """ Instantiate a pre-processed corpus. """ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP: corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path] else: corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME) # redirect to the cache, if necessary try: resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir) except EnvironmentError: logger.error( "Corpus '{}' was not found in corpus list ({}). " "We assumed '{}' was a path or url but couldn't find files {} " "at this path or url.".format( pretrained_model_name_or_path, ', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys()), pretrained_model_name_or_path, corpus_file)) return None if resolved_corpus_file == corpus_file: logger.info("loading corpus file {}".format(corpus_file)) else: logger.info("loading corpus file {} from cache at {}".format( corpus_file, resolved_corpus_file)) # Instantiate tokenizer. corpus = cls(*inputs, **kwargs) corpus_dict = torch.load(resolved_corpus_file) for key, value in corpus_dict.items(): corpus.__dict__[key] = value corpus.vocab = vocab if corpus.train is not None: corpus.train = torch.tensor(corpus.train, dtype=torch.long) if corpus.valid is not None: corpus.valid = torch.tensor(corpus.valid, dtype=torch.long) if corpus.test is not None: corpus.test = torch.tensor(corpus.test, dtype=torch.long) return corpus def __init__(self, *args, **kwargs): self.vocab = TransfoXLTokenizer(*args, **kwargs) self.dataset = None self.train = None self.valid = None self.test = None def build_corpus(self, path, dataset): self.dataset = dataset if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']: self.vocab.count_file(os.path.join(path, 'train.txt')) self.vocab.count_file(os.path.join(path, 'valid.txt')) self.vocab.count_file(os.path.join(path, 'test.txt')) elif self.dataset == 'wt103': self.vocab.count_file(os.path.join(path, 'train.txt')) elif self.dataset == 'lm1b': train_path_pattern = os.path.join( path, '1-billion-word-language-modeling-benchmark-r13output', 'training-monolingual.tokenized.shuffled', 'news.en-*') train_paths = glob.glob(train_path_pattern) # the vocab will load from file when build_vocab() is called self.vocab.build_vocab() if self.dataset in ['ptb', 'wt2', 'wt103']: self.train = self.vocab.encode_file( os.path.join(path, 'train.txt'), ordered=True) self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=True) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=True) elif self.dataset in ['enwik8', 'text8']: self.train = self.vocab.encode_file( os.path.join(path, 'train.txt'), ordered=True, add_eos=False) self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=True, add_eos=False) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=True, add_eos=False) elif self.dataset == 'lm1b': self.train = train_paths self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True) def get_iterator(self, split, *args, **kwargs): if split == 'train': if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(self.train, *args, **kwargs) elif self.dataset == 'lm1b': kwargs['shuffle'] = True data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs) elif split in ['valid', 'test']: data = self.valid if split == 'valid' else self.test if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(data, *args, **kwargs) elif self.dataset == 'lm1b': data_iter = LMShuffledIterator(data, *args, **kwargs) return data_iter def get_lm_corpus(datadir, dataset): fn = os.path.join(datadir, 'cache.pt') fn_pickle = os.path.join(datadir, 'cache.pkl') if os.path.exists(fn): logger.info('Loading cached dataset...') corpus = torch.load(fn_pickle) elif os.path.exists(fn): logger.info('Loading cached dataset from pickle...') with open(fn, "rb") as fp: corpus = pickle.load(fp) else: logger.info('Producing dataset {}...'.format(dataset)) kwargs = {} if dataset in ['wt103', 'wt2']: kwargs['special'] = ['<eos>'] kwargs['lower_case'] = False elif dataset == 'ptb': kwargs['special'] = ['<eos>'] kwargs['lower_case'] = True elif dataset == 'lm1b': kwargs['special'] = [] kwargs['lower_case'] = False kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt') elif dataset in ['enwik8', 'text8']: pass corpus = TransfoXLCorpus(datadir, dataset, **kwargs) torch.save(corpus, fn) return corpus
21,824
36.62931
133
py
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_transfo_xl_utilities.py
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for PyTorch Transformer XL model. Directly adapted from https://github.com/kimiyoung/transformer-xl. """ from collections import defaultdict import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # CUDA_MAJOR = int(torch.version.cuda.split('.')[0]) # CUDA_MINOR = int(torch.version.cuda.split('.')[1]) class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False): super(ProjectedAdaptiveLogSoftmax, self).__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.out_layers = nn.ModuleList() self.out_projs = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs)): if d_proj != d_embed: self.out_projs.append( nn.Parameter(torch.FloatTensor(d_proj, d_embed)) ) else: self.out_projs.append(None) self.out_layers.append(nn.Linear(d_embed, n_token)) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.out_projs.append( nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)) ) self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx)) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj is None: logit = F.linear(hidden, weight, bias=bias) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: proj_hid = F.linear(hidden, proj.t().contiguous()) logit = F.linear(proj_hid, weight, bias=bias) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def forward(self, hidden, labels=None, keep_order=False): ''' Params: hidden :: [len*bsz x d_proj] labels :: [len*bsz] Return: if labels is None: out :: [len*bsz] Negative log likelihood else: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary We could replace this implementation by the native PyTorch one if their's had an option to set bias on all clusters in the native one. here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138 ''' if labels is not None: labels = labels.view(-1) if hidden.size(0) != labels.size(0): raise RuntimeError('Input and labels should have the same size ' 'in the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) if labels is not None: out = -F.log_softmax(logit, dim=-1) \ .gather(1, labels.unsqueeze(1)).squeeze(1) else: out = F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) if labels is None: out = hidden.new_empty((head_logit.size(0), self.n_token)) else: out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] if labels is not None: mask_i = (labels >= l_idx) & (labels < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = labels.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) hidden_i = hidden.index_select(0, indices_i) else: hidden_i = hidden if i == 0: if labels is not None: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: logprob_i = head_logprob_i[:, cluster_prob_idx] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i out[:, l_idx:r_idx] = logprob_i if labels is not None: if (hasattr(self, 'keep_order') and self.keep_order) or keep_order: out.index_copy_(0, indices_i, -logprob_i) else: out[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return out def log_prob(self, hidden): r""" Computes log probabilities for all :math:`n\_classes` From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py Args: hidden (Tensor): a minibatch of examples Returns: log-probabilities of for each class :math:`c` in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor. Shape: - Input: :math:`(N, in\_features)` - Output: :math:`(N, n\_classes)` """ if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0]) return F.log_softmax(logit, dim=-1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers[0].weight[l_idx:r_idx] bias_i = self.out_layers[0].bias[l_idx:r_idx] else: weight_i = self.out_layers[i].weight bias_i = self.out_layers[i].bias if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) out = hidden.new_empty((head_logit.size(0), self.n_token)) head_logprob = F.log_softmax(head_logit, dim=1) cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1] if i == 0: out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]] else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob[:, -i] + tail_logprob_i out[:, start_idx, stop_idx] = logprob_i return out class LogUniformSampler(object): def __init__(self, range_max, n_sample): """ Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` expected count can be approximated by 1 - (1 - p)^n and we use a numerically stable version -expm1(num_tries * log1p(-p)) Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run """ with torch.no_grad(): self.range_max = range_max log_indices = torch.arange(1., range_max+2., 1.).log_() self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1] self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float() self.n_sample = n_sample def sample(self, labels): """ labels: [b1, b2] Return true_log_probs: [b1, b2] samp_log_probs: [n_sample] neg_samples: [n_sample] """ # neg_samples = torch.empty(0).long() n_sample = self.n_sample n_tries = 2 * n_sample with torch.no_grad(): neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique() device = labels.device neg_samples = neg_samples.to(device) true_log_probs = self.log_q[labels].to(device) samp_log_probs = self.log_q[neg_samples].to(device) return true_log_probs, samp_log_probs, neg_samples def sample_logits(embedding, bias, labels, inputs, sampler): """ embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample] """ true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels) n_sample = neg_samples.size(0) b1, b2 = labels.size(0), labels.size(1) all_ids = torch.cat([labels.view(-1), neg_samples]) all_w = embedding(all_ids) true_w = all_w[: -n_sample].view(b1, b2, -1) sample_w = all_w[- n_sample:].view(n_sample, -1) all_b = bias[all_ids] true_b = all_b[: -n_sample].view(b1, b2) sample_b = all_b[- n_sample:] hit = (labels[:, :, None] == neg_samples).detach() true_logits = torch.einsum('ijk,ijk->ij', [true_w, inputs]) + true_b - true_log_probs sample_logits = torch.einsum('lk,ijk->ijl', [sample_w, inputs]) + sample_b - samp_log_probs sample_logits.masked_fill_(hit, -1e30) logits = torch.cat([true_logits[:, :, None], sample_logits], -1) return logits
13,568
39.747748
132
py
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/modeling_roberta.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RoBERTa model. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu from .configuration_roberta import RobertaConfig from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = { 'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin", 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin", } class RobertaEmbeddings(BertEmbeddings): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super(RobertaEmbeddings, self).__init__(config) self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids, token_type_ids=None, position_ids=None): seq_length = input_ids.size(1) if position_ids is None: # Position numbers begin at padding_idx+1. Padding symbols are ignored. # cf. fairseq's `utils.make_positions` position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids, position_ids=position_ids) ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in `RoBERTa: A Robustly Optimized BERT Pretraining Approach`_ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. It is based on Google's BERT model released in 2018. It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining objective and training with much larger mini-batches and learning rates. This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained models. This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. .. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`: https://arxiv.org/abs/1907.11692 .. _`torch.nn.Module`: https://pytorch.org/docs/stable/nn.html#module Parameters: config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ROBERTA_INPUTS_DOCSTRING = r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of input sequence tokens in the vocabulary. To match pre-training, RoBERTa input sequence should be formatted with <s> and </s> tokens as follows: (a) For sequence pairs: ``tokens: <s> Is this Jacksonville ? </s> </s> No it is not . </s>`` (b) For single sequences: ``tokens: <s> the dog is hairy . </s>`` Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with the ``add_special_tokens`` parameter set to ``True``. RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **token_type_ids**: (`optional` need to be trained) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Optional segment token indices to indicate first and second portions of the inputs. This embedding matrice is not trained (not pretrained during RoBERTa pretraining), you will have to train it during finetuning. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1[``. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. """ @add_start_docstrings("The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) class RobertaModel(BertModel): r""" Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the output of the last layer of the model. **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaModel.from_pretrained('roberta-base') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super(RobertaModel, self).__init__(config) self.embeddings = RobertaEmbeddings(config) self.init_weights() def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): if input_ids[:, 0].sum().item() != 0: logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. " "This model requires special tokens in order to work. " "Please specify add_special_tokens=True in your tokenize.encode()" "or tokenizer.convert_tokens_to_ids().") return super(RobertaModel, self).forward(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) class RobertaForMaskedLM(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForMaskedLM.from_pretrained('roberta-base') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super(RobertaForMaskedLM, self).__init__(config) self.roberta = RobertaModel(config) self.lm_head = RobertaLMHead(config) self.init_weights() self.tie_weights() def tie_weights(self): """ Make sure we are sharing the input and output embeddings. Export to TorchScript can't handle parameter sharing so we are cloning them instead. """ self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, masked_lm_labels=None): outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) class RobertaLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super(RobertaLMHead, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) + self.bias return x @add_start_docstrings("""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) class RobertaForSequenceClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification (or regression if config.num_labels==1) loss. **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` Classification (or regression if config.num_labels==1) scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForSequenceClassification.from_pretrained('roberta-base') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] """ config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super(RobertaForSequenceClassification, self).__init__(config) self.num_labels = config.num_labels self.roberta = RobertaModel(config) self.classifier = RobertaClassificationHead(config) def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, labels=None): outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask) sequence_output = outputs[0] logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings("""Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) class RobertaForMultipleChoice(BertPreTrainedModel): r""" Inputs: **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: Indices of input sequence tokens in the vocabulary. The second dimension of the input (`num_choices`) indicates the number of choices to score. To match pre-training, RoBerta input sequence should be formatted with [CLS] and [SEP] tokens as follows: (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] [SEP] no it is not . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` ``token_type_ids: 0 0 0 0 0 0 0`` Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: Segment token indices to indicate first and second portions of the inputs. The second dimension of the input (`num_choices`) indicates the number of choices to score. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``: Mask to avoid performing attention on padding token indices. The second dimension of the input (`num_choices`) indicates the number of choices to score. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Classification loss. **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForMultipleChoice.from_pretrained('roberta-base') choices = ["Hello, my dog is cute", "Hello, my cat is amazing"] input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices labels = torch.tensor(1).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, classification_scores = outputs[:2] """ config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super(RobertaForMultipleChoice, self).__init__(config) self.roberta = RobertaModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, position_ids=None, head_mask=None): num_choices = input_ids.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions) class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super(RobertaClassificationHead, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x
25,678
53.52017
151
py
CLUE
CLUE-master/baselines/models_pytorch/classifier_pytorch/transformers/tokenization_utils.py
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import os import json import six import copy from io import open from .file_utils import cached_path, is_tf_available, is_torch_available if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch logger = logging.getLogger(__name__) SPECIAL_TOKENS_MAP_FILE = 'special_tokens_map.json' ADDED_TOKENS_FILE = 'added_tokens.json' TOKENIZER_CONFIG_FILE = 'tokenizer_config.json' class PreTrainedTokenizer(object): """ Base class for all tokenizers. Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary. This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...). Class attributes (overridden by derived classes): - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file. - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size. - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, a dictionnary of specific arguments to pass to the ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the ``from_pretrained()`` method. Parameters: - ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token`` and ``self.bos_token_id`` - ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token`` and ``self.eos_token_id`` - ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token`` and ``self.unk_token_id`` - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token`` and ``self.sep_token_id`` - ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token`` and ``self.pad_token_id`` - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token`` and ``self.cls_token_id`` - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id`` - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids`` """ vocab_files_names = {} pretrained_vocab_files_map = {} pretrained_init_configuration = {} max_model_input_sizes = {} SPECIAL_TOKENS_ATTRIBUTES = ["bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens"] @property def bos_token(self): """ Beginning of sentence token (string). Log an error if used while not having been set. """ if self._bos_token is None: logger.error("Using bos_token, but it is not set yet.") return self._bos_token @property def eos_token(self): """ End of sentence token (string). Log an error if used while not having been set. """ if self._eos_token is None: logger.error("Using eos_token, but it is not set yet.") return self._eos_token @property def unk_token(self): """ Unknown token (string). Log an error if used while not having been set. """ if self._unk_token is None: logger.error("Using unk_token, but it is not set yet.") return self._unk_token @property def sep_token(self): """ Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """ if self._sep_token is None: logger.error("Using sep_token, but it is not set yet.") return self._sep_token @property def pad_token(self): """ Padding token (string). Log an error if used while not having been set. """ if self._pad_token is None: logger.error("Using pad_token, but it is not set yet.") return self._pad_token @property def cls_token(self): """ Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ if self._cls_token is None: logger.error("Using cls_token, but it is not set yet.") return self._cls_token @property def mask_token(self): """ Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """ if self._mask_token is None: logger.error("Using mask_token, but it is not set yet.") return self._mask_token @property def additional_special_tokens(self): """ All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """ if self._additional_special_tokens is None: logger.error("Using additional_special_tokens, but it is not set yet.") return self._additional_special_tokens @bos_token.setter def bos_token(self, value): self._bos_token = value @eos_token.setter def eos_token(self, value): self._eos_token = value @unk_token.setter def unk_token(self, value): self._unk_token = value @sep_token.setter def sep_token(self, value): self._sep_token = value @pad_token.setter def pad_token(self, value): self._pad_token = value @cls_token.setter def cls_token(self, value): self._cls_token = value @mask_token.setter def mask_token(self, value): self._mask_token = value @additional_special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value @property def bos_token_id(self): """ Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.bos_token) @property def eos_token_id(self): """ Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.eos_token) @property def unk_token_id(self): """ Id of the unknown token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.unk_token) @property def sep_token_id(self): """ Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.sep_token) @property def pad_token_id(self): """ Id of the padding token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.pad_token) @property def cls_token_id(self): """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.cls_token) @property def mask_token_id(self): """ Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.mask_token) @property def additional_special_tokens_ids(self): """ Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.additional_special_tokens) def __init__(self, max_len=None, **kwargs): self._bos_token = None self._eos_token = None self._unk_token = None self._sep_token = None self._pad_token = None self._cls_token = None self._mask_token = None self._additional_special_tokens = [] self.max_len = max_len if max_len is not None else int(1e12) # Added tokens self.added_tokens_encoder = {} self.added_tokens_decoder = {} # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () self.init_kwargs = {} for key, value in kwargs.items(): if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == 'additional_special_tokens': assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value) else: assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode)) setattr(self, key, value) @classmethod def from_pretrained(cls, *inputs, **kwargs): r""" Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``. - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``. cache_dir: (`optional`) string: Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download: (`optional`) boolean, default False: Force to (re-)download the vocabulary files and override the cached versions if they exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method. kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details. Examples:: # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer # Download vocabulary from S3 and cache. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) tokenizer = BertTokenizer.from_pretrained('./test/saved_model/') # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt') # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>') # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == '<unk>' """ return cls._from_pretrained(*inputs, **kwargs) @classmethod def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) proxies = kwargs.pop('proxies', None) s3_models = list(cls.max_model_input_sizes.keys()) vocab_files = {} init_configuration = {} if pretrained_model_name_or_path in s3_models: # Get the vocabulary from AWS S3 bucket for file_id, map_list in cls.pretrained_vocab_files_map.items(): vocab_files[file_id] = map_list[pretrained_model_name_or_path] if cls.pretrained_init_configuration and pretrained_model_name_or_path in cls.pretrained_init_configuration: init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path] else: # Get the vocabulary from local files logger.info( "Model name '{}' not found in model shortcut name list ({}). " "Assuming '{}' is a path or url to a directory containing tokenizer files.".format( pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path)) # Look for the tokenizer main vocabulary files for file_id, file_name in cls.vocab_files_names.items(): if os.path.isdir(pretrained_model_name_or_path): # If a directory is provided we look for the standard filenames full_file_name = os.path.join(pretrained_model_name_or_path, file_name) else: # If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file) full_file_name = pretrained_model_name_or_path if not os.path.exists(full_file_name): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name # Look for the additional tokens files additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE, 'tokenizer_config_file': TOKENIZER_CONFIG_FILE, } # If a path to a file was provided, get the parent directory saved_directory = pretrained_model_name_or_path if os.path.exists(saved_directory) and not os.path.isdir(saved_directory): saved_directory = os.path.dirname(saved_directory) for file_id, file_name in additional_files_names.items(): full_file_name = os.path.join(saved_directory, file_name) if not os.path.exists(full_file_name): logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) full_file_name = None vocab_files[file_id] = full_file_name if all(full_file_name is None for full_file_name in vocab_files.values()): raise EnvironmentError( "Model name '{}' was not found in tokenizers model name list ({}). " "We assumed '{}' was a path or url to a directory containing vocabulary files " "named {} but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()))) # Get files from url, cache, or disk depending on the case try: resolved_vocab_files = {} for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None else: resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies) except EnvironmentError: if pretrained_model_name_or_path in s3_models: msg = "Couldn't reach server at '{}' to download vocabulary files." else: msg = "Model name '{}' was not found in tokenizers model name list ({}). " \ "We assumed '{}' was a path or url to a directory containing vocabulary files " \ "named {}, but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values())) raise EnvironmentError(msg) for file_id, file_path in vocab_files.items(): if file_path == resolved_vocab_files[file_id]: logger.info("loading file {}".format(file_path)) else: logger.info("loading file {} from cache at {}".format( file_path, resolved_vocab_files[file_id])) # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None) if tokenizer_config_file is not None: init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8")) saved_init_inputs = init_kwargs.pop('init_inputs', ()) if not init_inputs: init_inputs = saved_init_inputs else: init_kwargs = init_configuration # Update with newly provided kwargs init_kwargs.update(kwargs) # Set max length if needed if pretrained_model_name_or_path in cls.max_model_input_sizes: # if we're using a pretrained model, ensure the tokenizer # wont index sequences longer than the number of positional embeddings max_len = cls.max_model_input_sizes[pretrained_model_name_or_path] if max_len is not None and isinstance(max_len, (int, float)): init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(1e12)), max_len) # Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None) special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path if special_tokens_map_file is not None: special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8")) for key, value in special_tokens_map.items(): if key not in init_kwargs: init_kwargs[key] = value # Instantiate tokenizer. tokenizer = cls(*init_inputs, **init_kwargs) # Save inputs and kwargs for saving and re-loading with ``save_pretrained`` tokenizer.init_inputs = init_inputs tokenizer.init_kwargs = init_kwargs # Add supplementary tokens. if added_tokens_file is not None: added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8")) added_tok_decoder = {v:k for k, v in added_tok_encoder.items()} tokenizer.added_tokens_encoder.update(added_tok_encoder) tokenizer.added_tokens_decoder.update(added_tok_decoder) return tokenizer def save_pretrained(self, save_directory): """ Save the tokenizer vocabulary files together with: - added tokens, - special-tokens-to-class-attributes-mapping, - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert). This won't save modifications other than (added tokens and special token mapping) you may have applied to the tokenizer after the instantiation (e.g. modifying tokenizer.do_lower_case after creation). This method make sure the full tokenizer can then be re-loaded using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method. """ if not os.path.isdir(save_directory): logger.error("Saving directory ({}) should be a directory".format(save_directory)) return special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE) added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE) tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE) tokenizer_config = copy.deepcopy(self.init_kwargs) tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names.keys(): tokenizer_config.pop(file_id, None) with open(tokenizer_config_file, 'w', encoding='utf-8') as f: f.write(json.dumps(tokenizer_config, ensure_ascii=False)) with open(special_tokens_map_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.special_tokens_map, ensure_ascii=False)) with open(added_tokens_file, 'w', encoding='utf-8') as f: if self.added_tokens_encoder: out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False) else: out_str = u"{}" f.write(out_str) vocab_files = self.save_vocabulary(save_directory) return vocab_files + (special_tokens_map_file, added_tokens_file) def save_vocabulary(self, save_directory): """ Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens and special token mappings. Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method. """ raise NotImplementedError def vocab_size(self): """ Size of the base vocabulary (without the added tokens) """ raise NotImplementedError def __len__(self): """ Size of the full vocabulary with the added tokens """ return self.vocab_size + len(self.added_tokens_encoder) def add_tokens(self, new_tokens): """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary. Args: new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2']) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. """ if not new_tokens: return 0 to_add_tokens = [] for token in new_tokens: assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode)) if token != self.unk_token and \ self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and \ token not in to_add_tokens: to_add_tokens.append(token) logger.info("Adding %s to the vocabulary", token) added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(to_add_tokens)) added_tok_decoder = {v:k for k, v in added_tok_encoder.items()} self.added_tokens_encoder.update(added_tok_encoder) self.added_tokens_decoder.update(added_tok_decoder) return len(to_add_tokens) def num_added_tokens(self, pair=False): """ Returns the number of added tokens when encoding a sequence with special tokens. Note: This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. Args: pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the number of added tokens in the case of a single sequence if set to False. Returns: Number of tokens added to sequences """ token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def add_special_tokens(self, special_tokens_dict): """ Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). Using `add_special_tokens` will ensure your special tokens can be used in several ways: - special tokens are carefully handled by the tokenizer (they are never split) - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>') Args: special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them). Returns: Number of tokens added to the vocabulary. Examples:: # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') special_tokens_dict = {'cls_token': '<CLS>'} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print('We have added', num_added_toks, 'tokens') model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer. assert tokenizer.cls_token == '<CLS>' """ if not special_tokens_dict: return 0 added_tokens = 0 for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES if key == 'additional_special_tokens': assert isinstance(value, (list, tuple)) and all(isinstance(t, str) or (six.PY2 and isinstance(t, unicode)) for t in value) added_tokens += self.add_tokens(value) else: assert isinstance(value, str) or (six.PY2 and isinstance(value, unicode)) added_tokens += self.add_tokens([value]) logger.info("Assigning %s to the %s key of the tokenizer", value, key) setattr(self, key, value) return added_tokens def tokenize(self, text, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Take care of added tokens. """ def split_on_token(tok, text): result = [] split_text = text.split(tok) for i, sub_text in enumerate(split_text): sub_text = sub_text.strip() if i == 0 and not sub_text: result += [tok] elif i == len(split_text) - 1: if sub_text: result += [sub_text] else: pass else: if sub_text: result += [sub_text] result += [tok] return result def split_on_tokens(tok_list, text): if not text: return [] if not tok_list: return self._tokenize(text, **kwargs) tokenized_text = [] text_list = [text] for tok in tok_list: tokenized_text = [] for sub_text in text_list: if sub_text not in self.added_tokens_encoder \ and sub_text not in self.all_special_tokens: tokenized_text += split_on_token(tok, sub_text) else: tokenized_text += [sub_text] text_list = tokenized_text return sum((self._tokenize(token, **kwargs) if token not \ in self.added_tokens_encoder and token not in self.all_special_tokens \ else [token] for token in tokenized_text), []) added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens tokenized_text = split_on_tokens(added_tokens, text) return tokenized_text def _tokenize(self, text, **kwargs): """ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). Do NOT take care of added tokens. """ raise NotImplementedError def convert_tokens_to_ids(self, tokens): """ Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id (resp. a sequence of ids), using the vocabulary. """ if tokens is None: return None if isinstance(tokens, str) or (six.PY2 and isinstance(tokens, unicode)): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) if len(ids) > self.max_len: logger.warning("Token indices sequence length is longer than the specified maximum sequence length " "for this model ({} > {}). Running this sequence through the model will result in " "indexing errors".format(len(ids), self.max_len)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self.added_tokens_encoder: return self.added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def encode(self, text, text_pair=None, add_special_tokens=False, max_length=None, stride=0, truncation_strategy='longest_first', return_tensors=None, **kwargs): """ Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``. Args: text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative to their model. max_length: if set to a number, will limit the total sequence returned so that it has a maximum length. If there are overflowing tokens, those will be added to the returned dictionary stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. truncation_strategy: string selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. **kwargs: passed to the `self.tokenize()` method """ encoded_inputs = self.encode_plus(text, text_pair=text_pair, max_length=max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors, **kwargs) return encoded_inputs["input_ids"] def encode_plus(self, text, text_pair=None, add_special_tokens=False, max_length=None, stride=0, truncation_strategy='longest_first', return_tensors=None, **kwargs): """ Returns a dictionary containing the encoded sequence or sequence pair and additional informations: the mask for sequence classification and the overflowing elements if a ``max_length`` is specified. Args: text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method) add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative to their model. max_length: if set to a number, will limit the total sequence returned so that it has a maximum length. If there are overflowing tokens, those will be added to the returned dictionary stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. truncation_strategy: string selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. **kwargs: passed to the `self.tokenize()` method """ def get_input_ids(text): if isinstance(text, six.string_types): return self.convert_tokens_to_ids(self.tokenize(text, **kwargs)) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], six.string_types): return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError("Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.") first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self.prepare_for_model(first_ids, pair_ids=second_ids, max_length=max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors) def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=False, stride=0, truncation_strategy='longest_first', return_tensors=None): """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a window stride for overflowing tokens Args: ids: list of tokenized input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. max_length: maximum length of the returned list. Will truncate by taking into account the special tokens. add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative to their model. stride: window stride for overflowing tokens. Can be useful for edge effect removal when using sequential list of inputs. truncation_strategy: string selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. Return: A Dictionary of shape:: { input_ids: list[int], overflowing_tokens: list[int] if a ``max_length`` is specified, else None special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` } With the fields: ``input_ids``: list of tokens to be fed to a model ``overflowing_tokens``: list of overflowing tokens if a max length is specified. ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. """ pair = bool(pair_ids is not None) len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 encoded_inputs = {} total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0) if max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=total_len-max_length, truncation_strategy=truncation_strategy, stride=stride) encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else []) if return_tensors == 'tf' and is_tf_available(): sequence = tf.constant([sequence]) token_type_ids = tf.constant([token_type_ids]) elif return_tensors == 'pt' and is_torch_available(): sequence = torch.tensor([sequence]) token_type_ids = torch.tensor([token_type_ids]) elif return_tensors is not None: logger.warning("Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(return_tensors)) encoded_inputs["input_ids"] = sequence encoded_inputs["token_type_ids"] = token_type_ids if max_length and len(encoded_inputs["input_ids"]) > max_length: encoded_inputs["input_ids"] = encoded_inputs["input_ids"][:max_length] encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"][:max_length] encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"][:max_length] return encoded_inputs def truncate_sequences(self, ids, pair_ids=None, num_tokens_to_remove=0, truncation_strategy='longest_first', stride=0): """Truncates a sequence pair in place to the maximum length. truncation_strategy: string selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences). Overflowing tokens only contains overflow from the first sequence. - 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove. - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) """ if num_tokens_to_remove <= 0: return ids, pair_ids, [] if truncation_strategy == 'longest_first': overflowing_tokens = [] for _ in range(num_tokens_to_remove): if pair_ids is None or len(ids) > len(pair_ids): overflowing_tokens = [ids[-1]] + overflowing_tokens ids = ids[:-1] else: pair_ids = pair_ids[:-1] window_len = min(len(ids), stride) if window_len > 0: overflowing_tokens = ids[-window_len:] + overflowing_tokens elif truncation_strategy == 'only_first': assert len(ids) > num_tokens_to_remove window_len = min(len(ids), stride + num_tokens_to_remove) overflowing_tokens = ids[-window_len:] ids = ids[:-num_tokens_to_remove] elif truncation_strategy == 'only_second': assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove window_len = min(len(pair_ids), stride + num_tokens_to_remove) overflowing_tokens = pair_ids[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] elif truncation_strategy == 'do_not_truncate': raise ValueError("Input sequence are too long for max_length. Please select a truncation strategy.") else: raise ValueError("Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']") return (ids, pair_ids, overflowing_tokens) def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): logger.warning("This tokenizer does not make use of special tokens.") if token_ids_1 is None: return len(token_ids_0) * [0] return [0] * len(token_ids_0) + [1] * len(token_ids_1) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoBERTa sequence has the following format: single sequence: <s> X </s> pair of sequences: <s> A </s></s> B </s> """ logger.warning("This tokenizer does not make use of special tokens. Input is returned with no modification.") if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1 def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. Args: token_ids_0: list of ids (must not contain special tokens) token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids for sequence pairs already_has_special_tokens: (default False) Set to True if the token list is already formated with special tokens for the model Returns: A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. """ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) def convert_ids_to_tokens(self, ids, skip_special_tokens=False): """ Converts a single index or a sequence of indices (integers) in a token " (resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens. Args: skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False """ if isinstance(ids, int): if ids in self.added_tokens_decoder: return self.added_tokens_decoder[ids] else: return self._convert_id_to_token(ids) tokens = [] for index in ids: if skip_special_tokens and index in self.all_special_ids: continue if index in self.added_tokens_decoder: tokens.append(self.added_tokens_decoder[index]) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index): raise NotImplementedError def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids)) but we often want to remove sub-word tokenization artifacts at the same time. """ return ' '.join(self.convert_ids_to_tokens(tokens)) def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): """ Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``. Args: token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods. skip_special_tokens: if set to True, will replace special tokens. clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces. """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separatly for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(" " + token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) text = ''.join(sub_texts) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text @property def special_tokens_map(self): """ A dictionary mapping special token class attribute (cls_token, unk_token...) to their values ('<unk>', '<cls>'...) """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, "_" + attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens(self): """ List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes (cls_token, unk_token...). """ all_toks = [] set_attr = self.special_tokens_map for attr_value in set_attr.values(): all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]) all_toks = list(set(all_toks)) return all_toks @property def all_special_ids(self): """ List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to class attributes (cls_token, unk_token...). """ all_toks = self.all_special_tokens all_ids = list(self._convert_token_to_id(t) for t in all_toks) return all_ids @staticmethod def clean_up_tokenization(out_string): """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. """ out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',' ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't" ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re") return out_string
54,979
50.431244
372
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/google_albert_pytorch_modeling.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" from __future__ import print_function import copy import json import math import logging import six import torch from torch import nn from torch.nn import CrossEntropyLoss logger = logging.getLogger(__name__) def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def fast_gelu(x): return x * torch.sigmoid(1.702 * x) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": fast_gelu, "relu": torch.relu, "swish": swish} class AlbertConfig(object): """Configuration for `AlbertModel`. The default settings match the configuration of model `albert_xxlarge`. """ def __init__(self, vocab_size, embedding_size=128, hidden_size=4096, num_hidden_layers=12, num_hidden_groups=1, num_attention_heads=64, intermediate_size=16384, inner_group_num=1, down_scale_factor=1, hidden_act="gelu", hidden_dropout_prob=0, attention_probs_dropout_prob=0, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02): """Constructs AlbertConfig. Args: vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`. embedding_size: size of voc embeddings. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_hidden_groups: Number of group for the hidden layers, parameters in the same group are shared. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. inner_group_num: int, number of inner repetition of attention and ffn. down_scale_factor: float, the scale to apply hidden_act: The non-linear activation function (function or string) in the encoder and pooler. hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `AlbertModel`. initializer_range: The stdev of the truncated_normal_initializer for initializing all weight matrices. """ self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.inner_group_num = inner_group_num self.down_scale_factor = down_scale_factor self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range @classmethod def from_dict(cls, json_object): """Constructs a `AlbertConfig` from a Python dictionary of parameters.""" config = AlbertConfig(vocab_size=None) for (key, value) in six.iteritems(json_object): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `AlbertConfig` from a json file of parameters.""" with open(json_file, "r") as reader: text = reader.read() return cls.from_dict(json.loads(text)) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-5): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class AlbertEmbeddings(nn.Module): """ Albert embeddings. """ def __init__(self, config): super(AlbertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.embedding_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, position_ids=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) # if isinstance(config.hidden_act, str) else config.hidden_act self.intermediate_act_fn = ACT2FN[config.hidden_act] self.output = BertOutput(config) def forward(self, input_tensor): hidden_states = self.dense(input_tensor) hidden_states = self.intermediate_act_fn(hidden_states) hidden_output = self.output(hidden_states, input_tensor) return hidden_output class BertFF(nn.Module): def __init__(self, config): super(BertFF, self).__init__() self.intermediate = BertIntermediate(config) def forward(self, hidden_states): hidden_states = self.intermediate(hidden_states) return hidden_states class AlbertLayer(nn.Module): def __init__(self, config): super(AlbertLayer, self).__init__() self.attention_1 = BertAttention(config) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.ffn_1 = BertFF(config) self.LayerNorm_1 = BertLayerNorm(config.hidden_size, eps=1e-5) def forward(self, hidden_states, attention_mask): attention_output = self.attention_1(hidden_states, attention_mask) attention_output = self.LayerNorm(attention_output) attention_output = self.ffn_1(attention_output) attention_output = self.LayerNorm_1(attention_output) return attention_output class AlbertEncoder(nn.Module): def __init__(self, config): super(AlbertEncoder, self).__init__() self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) self.num_hidden_layers = config.num_hidden_layers self.transformer = AlbertLayer(config) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): hidden_states = self.embedding_hidden_mapping_in(hidden_states) all_encoder_layers = [] for i in range(self.num_hidden_layers): hidden_states = self.transformer(hidden_states, attention_mask) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class AlbertModel(nn.Module): def __init__(self, config): super(AlbertModel, self).__init__() self.embeddings = AlbertEmbeddings(config) self.encoder = AlbertEncoder(config) self.pooler = BertPooler(config) self.config = config self.apply(self.init_bert_weights) def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.normal_(mean=0.0, std=self.config.initializer_range) module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class AlbertForPreTraining(nn.Module): def __init__(self, config): super(AlbertForPreTraining, self).__init__() self.bert = AlbertModel(config) def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True): return self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers) class MRC_finetune(nn.Module): def __init__(self, config): super(MRC_finetune, self).__init__() self.start_dense = nn.Linear(config.hidden_size, 1) self.end_dense = nn.Linear(config.hidden_size, 1) def forward(self, input_tensor): return self.start_dense(input_tensor), self.end_dense(input_tensor) class AlbertForMRC(nn.Module): def __init__(self, config): super(AlbertForMRC, self).__init__() self.bert = AlbertModel(config) self.finetune_mrc = MRC_finetune(config) self.config = config self.apply(self.init_bert_weights) def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.normal_(mean=0.0, std=self.config.initializer_range) module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) start_logits, end_logits = self.finetune_mrc(sequence_output) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits class AlbertForMultipleChoice(nn.Module): def __init__(self, config, num_choices=2): super(AlbertForMultipleChoice, self).__init__() self.config = config self.num_choices = num_choices self.bert = AlbertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.normal_(mean=0.0, std=self.config.initializer_range) module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, return_logits=False): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if return_logits: return loss, reshaped_logits else: return loss else: return reshaped_logits
22,556
42.885214
119
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/run_c3.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import csv import json import logging import os import pickle import random import numpy as np import torch from google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice from pytorch_modeling import BertConfig, BertForMultipleChoice, ALBertConfig, ALBertForMultipleChoice from tools import official_tokenization as tokenization from tools import utils from tools.pytorch_optimization import get_optimization, warmup_linear from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm n_class = 4 reverse_order = False sa_step = False logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None, text_c=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.text_c = text_c self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class c3Processor(DataProcessor): def __init__(self, data_dir): self.D = [[], [], []] self.data_dir = data_dir for sid in range(3): data = [] for subtask in ["d", "m"]: with open(self.data_dir + "/c3-" + subtask + "-" + ["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f: data += json.load(f) if sid == 0: random.shuffle(data) for i in range(len(data)): for j in range(len(data[i][1])): d = ['\n'.join(data[i][0]).lower(), data[i][1][j]["question"].lower()] for k in range(len(data[i][1][j]["choice"])): d += [data[i][1][j]["choice"][k].lower()] for k in range(len(data[i][1][j]["choice"]), 4): d += ['无效答案'] # 有些C3数据选项不足4个,添加[无效答案]能够有效增强模型收敛稳定性 d += [data[i][1][j]["answer"].lower()] self.D[sid] += [d] def get_train_examples(self): """See base class.""" return self._create_examples(self.D[0], "train") def get_test_examples(self): """See base class.""" return self._create_examples(self.D[2], "test") def get_dev_examples(self): """See base class.""" return self._create_examples(self.D[1], "dev") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _create_examples(self, data, set_type): """Creates examples for the training and dev sets.""" cache_dir = os.path.join(self.data_dir, set_type + '_examples.pkl') if os.path.exists(cache_dir): examples = pickle.load(open(cache_dir, 'rb')) else: examples = [] for (i, d) in enumerate(data): answer = -1 # 这里data[i]有6个元素,0是context,1是问题,2~5是choice,6是答案 for k in range(4): if data[i][2 + k] == data[i][6]: answer = str(k) label = tokenization.convert_to_unicode(answer) for k in range(4): guid = "%s-%s-%s" % (set_type, i, k) text_a = tokenization.convert_to_unicode(data[i][0]) text_b = tokenization.convert_to_unicode(data[i][k + 2]) text_c = tokenization.convert_to_unicode(data[i][1]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, text_c=text_c)) with open(cache_dir, 'wb') as w: pickle.dump(examples, w) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" print("#examples", len(examples)) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i features = [[]] for (ex_index, example) in enumerate(tqdm(examples)): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = tokenizer.tokenize(example.text_b) tokens_c = tokenizer.tokenize(example.text_c) _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4) tokens_b = tokens_c + ["[SEP]"] + tokens_b tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) features[-1].append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) if len(features[-1]) == n_class: features.append([]) if len(features[-1]) == 0: features = features[:-1] print('#features', len(features)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length): """Truncates a sequence tuple in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) + len(tokens_c) if total_length <= max_length: break if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c): tokens_a.pop() elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c): tokens_b.pop() else: tokens_c.pop() def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--gpu_ids", default='0', type=str, required=True) parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--task_name", default='c3', type=str, required=True) parser.add_argument("--bert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.") parser.add_argument("--vocab_file", default=None, type=str, required=True, help="The vocabulary file that the BERT model was trained on.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written.") ## Other parameters parser.add_argument("--init_checkpoint", default='check_points/pretrain_models/albert_xxlarge_google_zh_v1121/pytorch_model.pth', type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument("--do_lower_case", default=True, action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=16, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=16, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--schedule", default='warmup_linear', type=str, help='schedule') parser.add_argument("--weight_decay_rate", default=0.01, type=float, help='weight_decay_rate') parser.add_argument('--clip_norm', type=float, default=1.0) parser.add_argument("--num_train_epochs", default=8.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--float16', action='store_true', default=False) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=422, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumualte before performing a backward/update pass.") parser.add_argument('--setting_file', type=str, default='setting.txt') parser.add_argument('--log_file', type=str, default='log.txt') args = parser.parse_args() args.setting_file = os.path.join(args.output_dir, args.setting_file) args.log_file = os.path.join(args.output_dir, args.log_file) os.makedirs(args.output_dir, exist_ok=True) with open(args.setting_file, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') print('------------ Options -------------') for k in args.__dict__: v = args.__dict__[k] opt_file.write('%s: %s\n' % (str(k), str(v))) print('%s: %s' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') print('------------ End -------------') os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if os.path.exists(args.log_file): os.remove(args.log_file) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") processor = c3Processor(args.data_dir) label_list = processor.get_labels() tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None if args.do_train: train_examples = processor.get_train_examples() num_train_steps = int(len(train_examples) / n_class / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) if 'albert' in args.bert_config_file: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) model = AlbertForMultipleChoice(bert_config, num_choices=n_class) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) model = ALBertForMultipleChoice(bert_config, num_choices=n_class) else: bert_config = BertConfig.from_json_file(args.bert_config_file) model = BertForMultipleChoice(bert_config, num_choices=n_class) if args.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}".format( args.max_seq_length, bert_config.max_position_embeddings)) if args.init_checkpoint is not None: utils.torch_show_all_params(model) utils.torch_init_model(model, args.init_checkpoint) if args.float16: model.half() model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) optimizer = get_optimization(model=model, float16=args.float16, learning_rate=args.learning_rate, total_steps=num_train_steps, schedule=args.schedule, warmup_rate=args.warmup_proportion, max_grad_norm=args.clip_norm, weight_decay_rate=args.weight_decay_rate, opt_pooler=True) # multi_choice must update pooler global_step = 0 eval_dataloader = None if args.do_eval: eval_examples = processor.get_dev_examples() feature_dir = os.path.join(args.data_dir, 'dev_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): eval_features = pickle.load(open(feature_dir, 'rb')) else: eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(eval_features, w) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in eval_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: eval_sampler = SequentialSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if args.do_train: best_accuracy = 0 feature_dir = os.path.join(args.data_dir, 'train_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): train_features = pickle.load(open(feature_dir, 'rb')) else: train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(train_features, w) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in train_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=True) steps_per_epoch = int(num_train_steps / args.num_train_epochs) for ie in range(int(args.num_train_epochs)): model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (ie + 1)) as pbar: for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_loss += loss.item() if args.float16: optimizer.backward(loss) # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step else: loss.backward() nb_tr_examples += input_ids.size(0) if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() # We have accumulated enought gradients model.zero_grad() global_step += 1 nb_tr_steps += 1 pbar.set_postfix({'loss': '{0:1.5f}'.format(tr_loss / (nb_tr_steps + 1e-5))}) pbar.update(1) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.cpu().numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples if args.do_train: result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': tr_loss / nb_tr_steps} else: result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) with open(args.log_file, 'a') as aw: aw.write("-------------------global steps:{}-------------------\n".format(global_step)) aw.write(str(json.dumps(result, indent=2)) + '\n') if eval_accuracy >= best_accuracy: torch.save(model.state_dict(), os.path.join(args.output_dir, "model_best.pt")) best_accuracy = eval_accuracy model.load_state_dict(torch.load(os.path.join(args.output_dir, "model_best.pt"))) torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt")) model.load_state_dict(torch.load(os.path.join(args.output_dir, "model.pt"))) if args.do_eval: logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.cpu().numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "results_dev.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) output_eval_file = os.path.join(args.output_dir, "logits_dev.txt") with open(output_eval_file, "w") as f: for i in range(len(logits_all)): for j in range(len(logits_all[i])): f.write(str(logits_all[i][j])) if j == len(logits_all[i]) - 1: f.write("\n") else: f.write(" ") test_examples = processor.get_test_examples() feature_dir = os.path.join(args.data_dir, 'test_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): test_features = pickle.load(open(feature_dir, 'rb')) else: test_features = convert_examples_to_features(test_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(test_features, w) logger.info("***** Running testing *****") logger.info(" Num examples = %d", len(test_examples)) logger.info(" Batch size = %d", args.eval_batch_size) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in test_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: test_sampler = SequentialSampler(test_data) else: test_sampler = DistributedSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size) model.eval() test_loss, test_accuracy = 0, 0 nb_test_steps, nb_test_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_test_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_test_accuracy = accuracy(logits, label_ids.reshape(-1)) test_loss += tmp_test_loss.mean().item() test_accuracy += tmp_test_accuracy nb_test_examples += input_ids.size(0) nb_test_steps += 1 test_loss = test_loss / nb_test_steps test_accuracy = test_accuracy / nb_test_examples result = {'test_loss': test_loss, 'test_accuracy': test_accuracy} output_test_file = os.path.join(args.output_dir, "results_test.txt") with open(output_test_file, "w") as writer: logger.info("***** Test results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) output_test_file = os.path.join(args.output_dir, "logits_test.txt") with open(output_test_file, "w") as f: for i in range(len(logits_all)): for j in range(len(logits_all[i])): f.write(str(logits_all[i][j])) if j == len(logits_all[i]) - 1: f.write("\n") else: f.write(" ") # the test submission order can't be changed submission_test = os.path.join(args.output_dir, "submission_test.json") test_preds = [int(np.argmax(logits_)) for logits_ in logits_all] with open(submission_test, "w") as f: json.dump(test_preds, f) if __name__ == "__main__": main()
34,700
41.061818
120
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/test_mrc.py
import argparse import collections import json import os from glob import glob import torch from torch.utils.data import TensorDataset, DataLoader from tqdm import tqdm from pytorch_modeling import BertConfig, BertForQuestionAnswering, ALBertConfig, ALBertForQA from google_albert_pytorch_modeling import AlbertConfig, AlbertForMRC from tools import official_tokenization as tokenization from tools import utils def test(model, args, eval_examples, eval_features, device): print("***** Eval *****") RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) output_prediction_file = os.path.join(args.output_dir, args.output_file) output_nbest_file = output_prediction_file.replace('predictions', 'nbest') all_input_ids = torch.tensor([f['input_ids'] for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f['input_mask'] for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f['segment_ids'] for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) eval_dataloader = DataLoader(eval_data, batch_size=args.n_batch, shuffle=False) model.eval() all_results = [] print("Start evaluating") for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask) for i, example_index in enumerate(example_indices): start_logits = batch_start_logits[i].detach().cpu().tolist() end_logits = batch_end_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature['unique_id']) all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) write_predictions(eval_examples, eval_features, all_results, n_best_size=args.n_best, max_answer_length=args.max_ans_length, do_lower_case=True, output_prediction_file=output_prediction_file, output_nbest_file=output_nbest_file) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu_ids', type=str, default='0') parser.add_argument('--task_name', type=str, required=True, default='cmrc2018') # training parameter parser.add_argument('--n_batch', type=int, default=32) parser.add_argument('--float16', action='store_true', default=False) # only sm >= 7.0 (tensorcores) parser.add_argument('--max_ans_length', type=int, default=50) parser.add_argument('--n_best', type=int, default=20) parser.add_argument('--vocab_size', type=int, default=21128) parser.add_argument('--max_seq_length', type=int, default=256) # data dir parser.add_argument('--test_dir1', type=str, required=True) parser.add_argument('--test_dir2', type=str, required=True) parser.add_argument('--test_file', type=str, default='cmrc2018_test_2k.json') parser.add_argument('--bert_config_file', type=str, required=True) parser.add_argument('--vocab_file', type=str, required=True) parser.add_argument('--init_restore_dir', type=str, required=True) parser.add_argument('--output_dir', type=str, required=True) parser.add_argument('--output_file', type=str, default='predictions_test.json') # use some global vars for convenience args = parser.parse_args() if args.task_name.lower() == 'drcd': from preprocess.DRCD_output import write_predictions from preprocess.DRCD_preprocess import json2features elif args.task_name.lower() == 'cmrc2018': from preprocess.cmrc2018_output import write_predictions from preprocess.cmrc2018_preprocess import json2features else: raise NotImplementedError args.test_dir1 = args.test_dir1.replace('examples.json', 'examples_' + str(args.max_seq_length) + '.json') args.test_dir2 = args.test_dir2.replace('features.json', 'features_' + str(args.max_seq_length) + '.json') if args.init_restore_dir.endswith('.pth') or \ args.init_restore_dir.endswith('.pt') or \ args.init_restore_dir.endswith('.bin'): pass else: args.init_restore_dir = glob(args.init_restore_dir + '*.pth') assert len(args.init_restore_dir) == 1 args.init_restore_dir = args.init_restore_dir[0] os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids device = torch.device("cuda") n_gpu = torch.cuda.device_count() print("device %s n_gpu %d" % (device, n_gpu)) print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16)) # load the bert setting if 'albert' not in args.bert_config_file: bert_config = BertConfig.from_json_file(args.bert_config_file) else: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) # load data print('loading data...') tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True) assert args.vocab_size == len(tokenizer.vocab) if not os.path.exists(args.test_dir1) or not os.path.exists(args.test_dir2): json2features(args.test_file, [args.test_dir1, args.test_dir2], tokenizer, is_training=False, max_seq_length=args.max_seq_length) if not os.path.exists(args.test_dir1): json2features(input_file=args.test_file, output_files=[args.test_dir1, args.test_dir2], tokenizer=tokenizer, is_training=False, repeat_limit=3, max_query_length=64, max_seq_length=args.max_seq_length, doc_stride=128) test_examples = json.load(open(args.test_dir1, 'r')) test_features = json.load(open(args.test_dir2, 'r')) dev_steps_per_epoch = len(test_features) // args.n_batch if len(test_features) % args.n_batch != 0: dev_steps_per_epoch += 1 # init model print('init model...') if 'albert' not in args.init_restore_dir: model = BertForQuestionAnswering(bert_config) else: if 'google' in args.init_restore_dir: model = AlbertForMRC(bert_config) else: model = ALBertForQA(bert_config, dropout_rate=args.dropout) utils.torch_show_all_params(model) utils.torch_init_model(model, args.init_restore_dir) if args.float16: model.half() model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) test(model, args, test_examples, test_features, device)
7,116
44.33121
110
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/run_mrc.py
import argparse import collections import json import os import random import numpy as np import torch from google_albert_pytorch_modeling import AlbertConfig, AlbertForMRC from preprocess.cmrc2018_evaluate import get_eval from pytorch_modeling import BertConfig, BertForQuestionAnswering, ALBertConfig, ALBertForQA from tools import official_tokenization as tokenization, utils from tools.pytorch_optimization import get_optimization, warmup_linear from torch.utils.data import TensorDataset, DataLoader from tqdm import tqdm def evaluate(model, args, eval_examples, eval_features, device, global_steps, best_f1, best_em, best_f1_em): print("***** Eval *****") RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) output_prediction_file = os.path.join(args.checkpoint_dir, "predictions_steps" + str(global_steps) + ".json") output_nbest_file = output_prediction_file.replace('predictions', 'nbest') all_input_ids = torch.tensor([f['input_ids'] for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f['input_mask'] for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f['segment_ids'] for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) eval_dataloader = DataLoader(eval_data, batch_size=args.n_batch, shuffle=False) model.eval() all_results = [] print("Start evaluating") for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask) for i, example_index in enumerate(example_indices): start_logits = batch_start_logits[i].detach().cpu().tolist() end_logits = batch_end_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature['unique_id']) all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) write_predictions(eval_examples, eval_features, all_results, n_best_size=args.n_best, max_answer_length=args.max_ans_length, do_lower_case=True, output_prediction_file=output_prediction_file, output_nbest_file=output_nbest_file) tmp_result = get_eval(args.dev_file, output_prediction_file) tmp_result['STEP'] = global_steps with open(args.log_file, 'a') as aw: aw.write(json.dumps(tmp_result) + '\n') print(tmp_result) if float(tmp_result['F1']) > best_f1: best_f1 = float(tmp_result['F1']) if float(tmp_result['EM']) > best_em: best_em = float(tmp_result['EM']) if float(tmp_result['F1']) + float(tmp_result['EM']) > best_f1_em: best_f1_em = float(tmp_result['F1']) + float(tmp_result['EM']) utils.torch_save_model(model, args.checkpoint_dir, {'f1': float(tmp_result['F1']), 'em': float(tmp_result['EM'])}, max_save_num=1) model.train() return best_f1, best_em, best_f1_em if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu_ids', type=str, default='0,1,2,3') # training parameter parser.add_argument('--train_epochs', type=int, default=2) parser.add_argument('--n_batch', type=int, default=32) parser.add_argument('--lr', type=float, default=3e-5) parser.add_argument('--dropout', type=float, default=0.1) parser.add_argument('--clip_norm', type=float, default=1.0) parser.add_argument('--warmup_rate', type=float, default=0.05) parser.add_argument("--schedule", default='warmup_linear', type=str, help='schedule') parser.add_argument("--weight_decay_rate", default=0.01, type=float, help='weight_decay_rate') parser.add_argument('--seed', type=list, default=[123]) parser.add_argument('--float16', action='store_true', default=False) # only sm >= 7.0 (tensorcores) parser.add_argument('--max_ans_length', type=int, default=50) parser.add_argument('--n_best', type=int, default=20) parser.add_argument('--eval_epochs', type=float, default=0.5) parser.add_argument('--save_best', type=bool, default=True) parser.add_argument('--vocab_size', type=int, default=21128) parser.add_argument('--max_seq_length', type=int, default=256) # data dir parser.add_argument('--train_dir', type=str, required=True) parser.add_argument('--dev_dir1', type=str, required=True) parser.add_argument('--dev_dir2', type=str, required=True) parser.add_argument('--train_file', type=str, required=True) parser.add_argument('--dev_file', type=str, required=True) parser.add_argument('--bert_config_file', type=str, required=True) parser.add_argument('--vocab_file', type=str, required=True) parser.add_argument('--init_restore_dir', type=str, required=True) parser.add_argument('--checkpoint_dir', type=str, required=True) parser.add_argument('--task_name', type=str, required=True) parser.add_argument('--setting_file', type=str, default='setting.txt') parser.add_argument('--log_file', type=str, default='log.txt') # use some global vars for convenience args = parser.parse_args() if args.task_name.lower() == 'drcd': from preprocess.DRCD_output import write_predictions from preprocess.DRCD_preprocess import json2features elif args.task_name.lower() == 'cmrc2018': from preprocess.cmrc2018_output import write_predictions from preprocess.cmrc2018_preprocess import json2features else: raise NotImplementedError args.train_dir = args.train_dir.replace('features.json', 'features_' + str(args.max_seq_length) + '.json') args.dev_dir1 = args.dev_dir1.replace('examples.json', 'examples_' + str(args.max_seq_length) + '.json') args.dev_dir2 = args.dev_dir2.replace('features.json', 'features_' + str(args.max_seq_length) + '.json') args = utils.check_args(args) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids device = torch.device("cuda") n_gpu = torch.cuda.device_count() print("device %s n_gpu %d" % (device, n_gpu)) print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16)) # load the bert setting if 'albert' not in args.bert_config_file: bert_config = BertConfig.from_json_file(args.bert_config_file) else: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) # load data print('loading data...') tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True) assert args.vocab_size == len(tokenizer.vocab) if not os.path.exists(args.train_dir): json2features(args.train_file, [args.train_dir.replace('_features_', '_examples_'), args.train_dir], tokenizer, is_training=True, max_seq_length=args.max_seq_length) if not os.path.exists(args.dev_dir1) or not os.path.exists(args.dev_dir2): json2features(args.dev_file, [args.dev_dir1, args.dev_dir2], tokenizer, is_training=False, max_seq_length=args.max_seq_length) train_features = json.load(open(args.train_dir, 'r')) dev_examples = json.load(open(args.dev_dir1, 'r')) dev_features = json.load(open(args.dev_dir2, 'r')) if os.path.exists(args.log_file): os.remove(args.log_file) steps_per_epoch = len(train_features) // args.n_batch eval_steps = int(steps_per_epoch * args.eval_epochs) dev_steps_per_epoch = len(dev_features) // args.n_batch if len(train_features) % args.n_batch != 0: steps_per_epoch += 1 if len(dev_features) % args.n_batch != 0: dev_steps_per_epoch += 1 total_steps = steps_per_epoch * args.train_epochs print('steps per epoch:', steps_per_epoch) print('total steps:', total_steps) print('warmup steps:', int(args.warmup_rate * total_steps)) F1s = [] EMs = [] # 存一个全局最优的模型 best_f1_em = 0 for seed_ in args.seed: best_f1, best_em = 0, 0 with open(args.log_file, 'a') as aw: aw.write('===================================' + 'SEED:' + str(seed_) + '===================================' + '\n') print('SEED:', seed_) random.seed(seed_) np.random.seed(seed_) torch.manual_seed(seed_) if n_gpu > 0: torch.cuda.manual_seed_all(seed_) # init model print('init model...') if 'albert' not in args.init_restore_dir: model = BertForQuestionAnswering(bert_config) else: if 'google' in args.init_restore_dir: model = AlbertForMRC(bert_config) else: model = ALBertForQA(bert_config, dropout_rate=args.dropout) utils.torch_show_all_params(model) utils.torch_init_model(model, args.init_restore_dir) if args.float16: model.half() model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) optimizer = get_optimization(model=model, float16=args.float16, learning_rate=args.lr, total_steps=total_steps, schedule=args.schedule, warmup_rate=args.warmup_rate, max_grad_norm=args.clip_norm, weight_decay_rate=args.weight_decay_rate) all_input_ids = torch.tensor([f['input_ids'] for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f['input_mask'] for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f['segment_ids'] for f in train_features], dtype=torch.long) seq_len = all_input_ids.shape[1] # 样本长度不能超过bert的长度限制 assert seq_len <= bert_config.max_position_embeddings # true label all_start_positions = torch.tensor([f['start_position'] for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f['end_position'] for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions) train_dataloader = DataLoader(train_data, batch_size=args.n_batch, shuffle=True) print('***** Training *****') model.train() global_steps = 1 best_em = 0 best_f1 = 0 for i in range(int(args.train_epochs)): print('Starting epoch %d' % (i + 1)) total_loss = 0 iteration = 1 with tqdm(total=steps_per_epoch, desc='Epoch %d' % (i + 1)) as pbar: for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, start_positions, end_positions = batch loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. total_loss += loss.item() pbar.set_postfix({'loss': '{0:1.5f}'.format(total_loss / (iteration + 1e-5))}) pbar.update(1) if args.float16: optimizer.backward(loss) # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.lr * warmup_linear(global_steps / total_steps, args.warmup_rate) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step else: loss.backward() optimizer.step() model.zero_grad() global_steps += 1 iteration += 1 if global_steps % eval_steps == 0: best_f1, best_em, best_f1_em = evaluate(model, args, dev_examples, dev_features, device, global_steps, best_f1, best_em, best_f1_em) F1s.append(best_f1) EMs.append(best_em) # release the memory del model del optimizer torch.cuda.empty_cache() print('Mean F1:', np.mean(F1s), 'Mean EM:', np.mean(EMs)) print('Best F1:', np.max(F1s), 'Best EM:', np.max(EMs)) with open(args.log_file, 'a') as aw: aw.write('Mean(Best) F1:{}({})\n'.format(np.mean(F1s), np.max(F1s))) aw.write('Mean(Best) EM:{}({})\n'.format(np.mean(EMs), np.max(EMs)))
13,603
45.749141
112
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/pytorch_modeling.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import torch from torch import nn from torch.nn import CrossEntropyLoss from tools.file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } CONFIG_NAME = 'bert_config.json' WEIGHTS_NAME = 'pytorch_model.bin' def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class ALBertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, embedding_size=128, ln_type="postln", hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.embedding_size = embedding_size self.ln_type = ln_type self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-5): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) # TODO:ROBERTA暂时存在一些问题,必须512才能加载一些模型,但是部分模型却不是用512长度训练的,要注意 self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class ALBertEmbeddings(nn.Module): """ ALBert embeddings. """ def __init__(self, config): super(ALBertEmbeddings, self).__init__() # word_embeddings_2: project vector(output_middle) to the hidden space if config.embedding_size == config.hidden_size: self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0) self.word_embeddings_2 = None else: self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0) self.word_embeddings_2 = nn.Linear(config.embedding_size, config.hidden_size, bias=False) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, position_ids=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) if self.word_embeddings_2: words_embeddings = self.word_embeddings_2(words_embeddings) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.ln_type = 'postln' if 'ln_type' in config.__dict__: self.ln_type = config.ln_type def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) if self.ln_type == 'preln': hidden_states = hidden_states + input_tensor else: hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.ln_type = 'postln' if 'ln_type' in config.__dict__: self.ln_type = config.ln_type def forward(self, input_tensor, attention_mask): if self.ln_type == 'preln': hidden_state = self.output.LayerNorm(input_tensor) # pre_ln self_output = self.self(hidden_state, attention_mask) else: self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.ln_type = 'postln' if 'ln_type' in config.__dict__: self.ln_type = config.ln_type def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) if self.ln_type == 'preln': hidden_states = hidden_states + input_tensor else: hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.ln_type = 'postln' if 'ln_type' in config.__dict__: self.ln_type = config.ln_type self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask): attention_output = self.attention(hidden_states, attention_mask) if self.ln_type == 'preln': attention_output_pre = self.output.LayerNorm(attention_output) else: attention_output_pre = attention_output intermediate_output = self.intermediate(attention_output_pre) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() layer = BertLayer(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): all_encoder_layers = [] for layer_module in self.layer: hidden_states = layer_module(hidden_states, attention_mask) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class ALBertEncoder(nn.Module): def __init__(self, config): super(ALBertEncoder, self).__init__() self.num_hidden_layers = config.num_hidden_layers self.layer_shared = BertLayer(config) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): all_encoder_layers = [] for i in range(self.num_hidden_layers): hidden_states = self.layer_shared(hidden_states, attention_mask) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class PreTrainedBertModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(PreTrainedBertModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.normal_(mean=0.0, std=self.config.initializer_range) module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-base-multilingual` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name] else: archive_file = pretrained_model_name # redirect to the cache, if necessary try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except FileNotFoundError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info("Model config {}".format(config)) # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if tempdir: # Clean up temp dir shutil.rmtree(tempdir) return model class BertModel(PreTrainedBertModel): def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class ALBertModel(PreTrainedBertModel): def __init__(self, config): super(ALBertModel, self).__init__(config) self.embeddings = ALBertEmbeddings(config) self.encoder = ALBertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertForPreTraining(PreTrainedBertModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: return prediction_scores, seq_relationship_score class ALBertForPreTraining(PreTrainedBertModel): def __init__(self, config): super(ALBertForPreTraining, self).__init__(config) self.bert = ALBertModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) return sequence_output, pooled_output # 不做预训练的话,这些不做也没事 # prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) # # if masked_lm_labels is not None and next_sentence_label is not None: # loss_fct = CrossEntropyLoss(ignore_index=-1) # masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) # next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) # total_loss = masked_lm_loss + next_sentence_loss # return total_loss # else: # return prediction_scores, seq_relationship_score class BertForMaskedLM(PreTrainedBertModel): def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(PreTrainedBertModel): def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) seq_relationship_score = self.cls(pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(PreTrainedBertModel): def __init__(self, config, num_labels=2): super(BertForSequenceClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForMultipleChoice(PreTrainedBertModel): def __init__(self, config, num_choices=2): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, return_logits=False): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if return_logits: return loss, reshaped_logits else: return loss else: return reshaped_logits class BertForTokenClassification(PreTrainedBertModel): def __init__(self, config, num_labels=2): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForQuestionAnswering(PreTrainedBertModel): def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.bert = BertModel(config) # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits class BertForQA_CLS(PreTrainedBertModel): def __init__(self, config): super(BertForQA_CLS, self).__init__(config) self.bert = BertModel(config) # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.cls_outputs = nn.Linear(config.hidden_size, 3) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, target_labels=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) target_logits = self.cls_outputs(pooled_output) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) # classifier loss loss_fct_cls = CrossEntropyLoss(ignore_index=-1) # no loss for has answer cls_loss = loss_fct_cls(target_logits, target_labels) total_loss = ((start_loss + end_loss) / 2) + cls_loss return total_loss else: return start_logits, end_logits, target_logits class ALBertForQA(PreTrainedBertModel): def __init__(self, config, dropout_rate): super(ALBertForQA, self).__init__(config) self.bert = ALBertModel(config) self.ln_type = config.ln_type if self.ln_type == 'ln_pre': self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) else: self.LayerNorm = None self.dropout = nn.Dropout(dropout_rate) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) if self.ln_type == 'ln_pre': sequence_output = self.LayerNorm(sequence_output) sequence_output = self.dropout(sequence_output) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits class ALBertForQA_CLS(PreTrainedBertModel): def __init__(self, config, dropout_rate): super(ALBertForQA_CLS, self).__init__(config) self.bert = ALBertModel(config) self.ln_type = config.ln_type if self.ln_type == 'ln_pre': self.LayerNorm_qa = BertLayerNorm(config.hidden_size, eps=1e-5) self.LayerNorm_cls = BertLayerNorm(config.hidden_size, eps=1e-5) else: self.LayerNorm_qa = None self.LayerNorm_cls = None self.dropout_qa = nn.Dropout(dropout_rate) self.dropout_cls = nn.Dropout(dropout_rate) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.cls_outputs = nn.Linear(config.hidden_size, 3) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, target_labels=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) if self.ln_type == 'ln_pre': sequence_output = self.LayerNorm_qa(sequence_output) pooled_output = self.LayerNorm_cls(pooled_output) sequence_output = self.dropout_qa(sequence_output) pooled_output = self.dropout_cls(pooled_output) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) target_logits = self.cls_outputs(pooled_output) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) # classifier loss loss_fct_cls = CrossEntropyLoss(ignore_index=-1) # no loss for has answer cls_loss = loss_fct_cls(target_logits, target_labels) total_loss = ((start_loss + end_loss) / 2) + cls_loss return total_loss else: return start_logits, end_logits, target_logits class ALBertForMultipleChoice(PreTrainedBertModel): def __init__(self, config, num_choices=2): super(ALBertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = ALBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, return_logits=False): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if return_logits: return loss, reshaped_logits else: return loss else: return reshaped_logits
57,982
45.798224
130
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/convert_tf_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2018 The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" from __future__ import print_function import argparse import os import re import numpy as np import tensorflow as tf import torch from pytorch_modeling import BertConfig, BertForPreTraining, ALBertConfig, ALBertForPreTraining def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path, is_albert): config_path = os.path.abspath(bert_config_file) tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) # Initialise PyTorch model if is_albert: config = ALBertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = ALBertForPreTraining(config) else: config = BertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = BertForPreTraining(config) for name, array in zip(names, arrays): name = name.split('/') if name[0] == 'global_step': continue # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name[-13:] == '_embeddings_2': pointer = getattr(pointer, 'weight') array = np.transpose(array) elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--tf_checkpoint_path", default='check_points/pretrain_models/albert_large_zh/albert_model.ckpt', type=str, help="Path the TensorFlow checkpoint path.") parser.add_argument("--bert_config_file", default='check_points/pretrain_models/albert_large_zh/albert_config_large.json', type=str, help="The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.") parser.add_argument("--pytorch_dump_path", default='check_points/pretrain_models/albert_large_zh/pytorch_albert_model.pth', type=str, help="Path to the output PyTorch model.") parser.add_argument("--is_albert", default=False, action='store_true', type=bool, help="whether is albert?") args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path, args.is_albert)
5,188
40.18254
105
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/test_multichoice_mrc.py
from __future__ import print_function import argparse import os from glob import glob import torch from google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice from preprocess.CHID_preprocess import RawResult, get_final_predictions, write_predictions, \ generate_input from pytorch_modeling import ALBertConfig, ALBertForMultipleChoice from pytorch_modeling import BertConfig, BertForMultipleChoice from tools.official_tokenization import BertTokenizer from torch.utils.data import TensorDataset, DataLoader, SequentialSampler from tqdm import tqdm def torch_init_model(model, init_restore_dir): state_dict = torch.load(init_restore_dir, map_location='cpu') missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') print("missing keys:{}".format(missing_keys)) print('unexpected keys:{}'.format(unexpected_keys)) print('error msgs:{}'.format(error_msgs)) def main(): parser = argparse.ArgumentParser() parser.add_argument("--gpu_ids", default='0', type=str) parser.add_argument("--bert_config_file", default='check_points/pretrain_models/bert_wwm_ext_base/bert_config.json', type=str, help="The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") parser.add_argument("--vocab_file", default='check_points/pretrain_models/bert_wwm_ext_base/vocab.txt', type=str, help="The vocabulary file that the BERT model was trained on.") parser.add_argument("--init_restore_dir", required=True, type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument("--input_dir", required=True, default='dataset/CHID') parser.add_argument("--output_dir", required=True, type=str, help="The output directory where the model checkpoints and predictions will be written.") parser.add_argument("--predict_file", required=True, type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument('--output_file', type=str, default='test_predictions.json') ## Other parameters parser.add_argument("--max_seq_length", default=64, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--max_num_choices", default=10, type=int, help="The maximum number of cadicate answer, shorter than this will be padded.") parser.add_argument("--predict_batch_size", default=16, type=int, help="Total batch size for predictions.") parser.add_argument("--do_lower_case", default=True, help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument('--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") args = parser.parse_args() print(args) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device: {}, 16-bits training: {}".format(device, args.fp16)) tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) test_example_file = os.path.join(args.input_dir, 'test_examples_{}.pkl'.format(str(args.max_seq_length))) test_feature_file = os.path.join(args.input_dir, 'test_features_{}.pkl'.format(str(args.max_seq_length))) eval_features = generate_input(args.predict_file, None, test_example_file, test_feature_file, tokenizer, max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices, is_training=False) # Prepare model if 'albert' in args.bert_config_file: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) model = AlbertForMultipleChoice(bert_config, num_choices=args.max_num_choices) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) model = ALBertForMultipleChoice(bert_config, num_choices=args.max_num_choices) else: bert_config = BertConfig.from_json_file(args.bert_config_file) model = BertForMultipleChoice(bert_config, num_choices=args.max_num_choices) model = model.to(device) if args.init_restore_dir.endswith('.pth') or \ args.init_restore_dir.endswith('.pt') or \ args.init_restore_dir.endswith('.bin'): pass else: args.init_restore_dir = glob(args.init_restore_dir + '*.pth') + \ glob(args.init_restore_dir + '*.pt') + \ glob(args.init_restore_dir + '*.bin') assert len(args.init_restore_dir) == 1 args.init_restore_dir = args.init_restore_dir[0] torch_init_model(model, args.init_restore_dir) if args.fp16: model = model.half() print("***** Running predictions *****") print("Num split examples = %d", len(eval_features)) print("Batch size = %d", args.predict_batch_size) all_example_ids = [f.example_id for f in eval_features] all_tags = [f.tag for f in eval_features] all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_masks = torch.tensor([f.input_masks for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_choice_masks = torch.tensor([f.choice_masks for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks, all_example_index) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) model.eval() all_results = [] print("Start evaluating") for input_ids, input_masks, segment_ids, choice_masks, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=None): if len(all_results) == 0: print('shape of input_ids: {}'.format(input_ids.shape)) input_ids = input_ids.to(device) input_masks = input_masks.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_masks, labels=None) for i, example_index in enumerate(example_indices): logits = batch_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature.unique_id) all_results.append(RawResult(unique_id=unique_id, example_id=all_example_ids[unique_id], tag=all_tags[unique_id], logit=logits)) else: print("prediction is over") print('decoder raw results') tmp_predict_file = os.path.join(args.output_dir, "test_raw_predictions.pkl") output_prediction_file = os.path.join(args.output_dir, args.output_file) results = get_final_predictions(all_results, tmp_predict_file, g=True) write_predictions(results, output_prediction_file) print('predictions saved to {}'.format(output_prediction_file)) if __name__ == "__main__": main()
9,007
49.044444
118
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/run_multichoice_mrc.py
""" @name = 'roberta_wwm_ext_large' @author = 'zhangxinrui' @time = '2019/11/15' roberta_wwm_ext_large 的baseline版本 coding=utf-8 Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import print_function import argparse import os import random import numpy as np import torch from google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice from preprocess.CHID_preprocess import RawResult, get_final_predictions, write_predictions, generate_input, evaluate from pytorch_modeling import ALBertConfig, ALBertForMultipleChoice from pytorch_modeling import BertConfig, BertForMultipleChoice from tools.official_tokenization import BertTokenizer from tools.pytorch_optimization import get_optimization, warmup_linear from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from tqdm import tqdm def reset_model(args, bert_config, model_cls): # Prepare model model = model_cls(bert_config, num_choices=args.max_num_choices) if args.init_restore_dir is not None: print('load bert weight') state_dict = torch.load(args.init_restore_dir, map_location='cpu') missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') print("missing keys:{}".format(missing_keys)) print('unexpected keys:{}'.format(unexpected_keys)) print('error msgs:{}'.format(error_msgs)) if args.fp16: model.half() return model def main(): parser = argparse.ArgumentParser() parser.add_argument("--gpu_ids", default='', required=True, type=str) parser.add_argument("--bert_config_file", required=True, default='check_points/pretrain_models/roberta_wwm_ext_large/bert_config.json') parser.add_argument("--vocab_file", required=True, default='check_points/pretrain_models/roberta_wwm_ext_large/vocab.txt') parser.add_argument("--init_restore_dir", required=True, default='check_points/pretrain_models/roberta_wwm_ext_large/pytorch_model.pth') parser.add_argument("--input_dir", required=True, default='dataset/CHID') parser.add_argument("--output_dir", required=True, default='check_points/CHID') ## Other parameters parser.add_argument("--train_file", default='./origin_data/CHID/train.json', type=str, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--train_ans_file", default='./origin_data/CHID/train_answer.json', type=str, help="SQuAD answer for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default='./origin_data/CHID/dev.json', type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--predict_ans_file", default='origin_data/CHID/dev_answer.json', type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--max_seq_length", default=64, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--max_num_choices", default=10, type=int, help="The maximum number of cadicate answer, shorter than this will be padded.") parser.add_argument("--train_batch_size", default=20, type=int, help="Total batch size for training.") parser.add_argument("--predict_batch_size", default=16, type=int, help="Total batch size for predictions.") parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.06, type=float, help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% " "of training.") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--do_lower_case", default=True, help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument('--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") args = parser.parse_args() print(args) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() print("device: {} n_gpu: {}, 16-bits training: {}".format(device, n_gpu, args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if os.path.exists(args.input_dir) == False: os.makedirs(args.input_dir, exist_ok=True) if os.path.exists(args.output_dir) == False: os.makedirs(args.output_dir, exist_ok=True) tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) print('ready for train dataset') train_example_file = os.path.join(args.input_dir, 'train_examples_{}.pkl'.format(str(args.max_seq_length))) train_feature_file = os.path.join(args.input_dir, 'train_features_{}.pkl'.format(str(args.max_seq_length))) train_features = generate_input(args.train_file, args.train_ans_file, train_example_file, train_feature_file, tokenizer, max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices, is_training=True) dev_example_file = os.path.join(args.input_dir, 'dev_examples_{}.pkl'.format(str(args.max_seq_length))) dev_feature_file = os.path.join(args.input_dir, 'dev_features_{}.pkl'.format(str(args.max_seq_length))) eval_features = generate_input(args.predict_file, None, dev_example_file, dev_feature_file, tokenizer, max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices, is_training=False) print("train features {}".format(len(train_features))) num_train_steps = int( len(train_features) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) print("loaded train dataset") print("Num generate examples = {}".format(len(train_features))) print("Batch size = {}".format(args.train_batch_size)) print("Num steps for a epoch = {}".format(num_train_steps)) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_masks = torch.tensor([f.input_masks for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_choice_masks = torch.tensor([f.choice_masks for f in train_features], dtype=torch.long) all_labels = torch.tensor([f.label for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks, all_labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=True) all_example_ids = [f.example_id for f in eval_features] all_tags = [f.tag for f in eval_features] all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_masks = torch.tensor([f.input_masks for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_choice_masks = torch.tensor([f.choice_masks for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks, all_example_index) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) # Prepare model if 'albert' in args.bert_config_file: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) model = reset_model(args, bert_config, AlbertForMultipleChoice) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) model = reset_model(args, bert_config, ALBertForMultipleChoice) else: bert_config = BertConfig.from_json_file(args.bert_config_file) model = reset_model(args, bert_config, BertForMultipleChoice) model = model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) optimizer = get_optimization(model, float16=args.fp16, learning_rate=args.learning_rate, total_steps=num_train_steps, schedule='warmup_linear', warmup_rate=args.warmup_proportion, weight_decay_rate=0.01, max_grad_norm=1.0, opt_pooler=True) global_step = 0 best_acc = 0 acc = 0 for i in range(int(args.num_train_epochs)): num_step = 0 average_loss = 0 model.train() model.zero_grad() # 等价于optimizer.zero_grad() steps_per_epoch = num_train_steps // args.num_train_epochs with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (i + 1)) as pbar: for step, batch in enumerate(train_dataloader): if n_gpu == 1: batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self input_ids, input_masks, segment_ids, choice_masks, labels = batch if step == 0 and i == 0: print('shape of input_ids: {}'.format(input_ids.shape)) print('shape of labels: {}'.format(labels.shape)) loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_masks, labels=labels) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() global_step += 1 average_loss += loss.item() num_step += 1 pbar.set_postfix({'loss': '{0:1.5f}'.format(average_loss / (num_step + 1e-5))}) pbar.update(1) print("***** Running predictions *****") print("Num split examples = {}".format(len(eval_features))) print("Batch size = {}".format(args.predict_batch_size)) model.eval() all_results = [] print("Start evaluating") for input_ids, input_masks, segment_ids, choice_masks, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=None): if len(all_results) == 0: print('shape of input_ids: {}'.format(input_ids.shape)) input_ids = input_ids.to(device) input_masks = input_masks.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_masks, labels=None) for i, example_index in enumerate(example_indices): logits = batch_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature.unique_id) all_results.append(RawResult(unique_id=unique_id, example_id=all_example_ids[unique_id], tag=all_tags[unique_id], logit=logits)) predict_file = 'dev_predictions.json' print('decoder raw results') tmp_predict_file = os.path.join(args.output_dir, "raw_predictions.pkl") output_prediction_file = os.path.join(args.output_dir, predict_file) results = get_final_predictions(all_results, tmp_predict_file, g=True) write_predictions(results, output_prediction_file) print('predictions saved to {}'.format(output_prediction_file)) if args.predict_ans_file: acc = evaluate(args.predict_ans_file, output_prediction_file) print(f'{args.predict_file} 预测精度:{acc}') # Save a epoch trained model if acc > best_acc: best_acc = acc output_model_file = os.path.join(args.output_dir, "best_checkpoint.bin") print('save trained model from {}'.format(output_model_file)) model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self torch.save(model_to_save.state_dict(), output_model_file) if __name__ == "__main__": main()
16,489
50.69279
118
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/pytorch_optimization.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import torch from torch.nn.utils import clip_grad_norm_ from torch.optim.optimizer import Optimizer def warmup_cosine(x, warmup=0.002): if x < warmup: return x / warmup return 0.5 * (1.0 + torch.cos(math.pi * x)) def warmup_constant(x, warmup=0.002): if x < warmup: return x / warmup return 1.0 def warmup_linear(x, warmup=0.002): if x < warmup: return x / warmup return (1.0 - x) / (1.0 - warmup) def warmup_fix(step, warmup_step): return min(1.0, step / warmup_step) SCHEDULES = { 'warmup_cosine': warmup_cosine, 'warmup_constant': warmup_constant, 'warmup_linear': warmup_linear, 'warmup_fix': warmup_fix } class BERTAdam(Optimizer): """Implements BERT version of Adam algorithm with weight decay fix (and no ). Params: lr: learning rate warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 t_total: total number of training steps for the learning rate schedule, -1 means constant learning rate. Default: -1 schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' b1: Adams b1. Default: 0.9 b2: Adams b2. Default: 0.999 e: Adams epsilon. Default: 1e-6 weight_decay_rate: Weight decay. Default: 0.01 max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 """ def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay_rate=0.01, cycle_step=None, max_grad_norm=1.0): if lr is not None and not lr >= 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if schedule not in SCHEDULES: raise ValueError("Invalid schedule parameter: {}".format(schedule)) if not 0.0 <= warmup < 1.0 and not warmup == -1: raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) if not 0.0 <= b1 < 1.0: raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) if not 0.0 <= b2 < 1.0: raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) if not e >= 0.0: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay_rate=weight_decay_rate, max_grad_norm=max_grad_norm, cycle_step=cycle_step) super(BERTAdam, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want ot decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay_rate'] > 0.0: update += group['weight_decay_rate'] * p.data schedule_fct = SCHEDULES[group['schedule']] if group['cycle_step'] is not None and state['step'] > group['cycle_step']: lr_scheduled = group['lr'] * (1 - ((state['step'] % group['cycle_step']) / group['cycle_step'])) elif group['t_total'] != -1 and group['schedule'] != 'warmup_fix': lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup']) elif group['schedule'] == 'warmup_fix': lr_scheduled = group['lr'] * schedule_fct(state['step'], group['warmup'] * group['t_total']) else: lr_scheduled = group['lr'] update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 return loss def get_optimization(model, float16, learning_rate, total_steps, schedule, warmup_rate, weight_decay_rate, max_grad_norm, opt_pooler=False): # Prepare optimizer assert 0.0 <= warmup_rate <= 1.0 param_optimizer = list(model.named_parameters()) # hack to remove pooler, which is not used # thus it produce None grad that break apex if opt_pooler is False: param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_parameters = [ {'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])], 'weight_decay_rate': weight_decay_rate}, {'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])], 'weight_decay_rate': 0.0} ] if float16: try: from apex.contrib.optimizers import FP16_Optimizer from apex.contrib.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_parameters, lr=learning_rate, bias_correction=False, max_grad_norm=max_grad_norm) optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = BERTAdam(params=optimizer_parameters, lr=learning_rate, warmup=warmup_rate, max_grad_norm=max_grad_norm, t_total=total_steps, schedule=schedule, weight_decay_rate=weight_decay_rate) return optimizer
8,435
41.606061
116
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/utils.py
import collections import os import re from glob import glob import tensorflow as tf import tensorflow.contrib.slim as slim import torch def check_args(args): args.setting_file = os.path.join(args.checkpoint_dir, args.setting_file) args.log_file = os.path.join(args.checkpoint_dir, args.log_file) os.makedirs(args.checkpoint_dir, exist_ok=True) with open(args.setting_file, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') print('------------ Options -------------') for k in args.__dict__: v = args.__dict__[k] opt_file.write('%s: %s\n' % (str(k), str(v))) print('%s: %s' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') print('------------ End -------------') return args def show_all_variables(rank=0): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True if rank == 0 else False) def torch_show_all_params(model, rank=0): params = list(model.parameters()) k = 0 for i in params: l = 1 for j in i.size(): l *= j k = k + l if rank == 0: print("Total param num:" + str(k)) # import ipdb def get_assigment_map_from_checkpoint(tvars, init_checkpoint): """Compute the union of the current variables and checkpoint variables.""" initialized_variable_names = {} new_variable_names = set() unused_variable_names = set() name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) assignment_map = collections.OrderedDict() for x in init_vars: (name, var) = (x[0], x[1]) if name not in name_to_variable: if 'adam' not in name: unused_variable_names.add(name) continue # assignment_map[name] = name assignment_map[name] = name_to_variable[name] initialized_variable_names[name] = 1 initialized_variable_names[name + ":0"] = 1 for name in name_to_variable: if name not in initialized_variable_names: new_variable_names.add(name) return assignment_map, initialized_variable_names, new_variable_names, unused_variable_names # loading weights def init_from_checkpoint(init_checkpoint, tvars=None, rank=0): if not tvars: tvars = tf.trainable_variables() assignment_map, initialized_variable_names, new_variable_names, unused_variable_names \ = get_assigment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) if rank == 0: # 显示成功加载的权重 for t in initialized_variable_names: if ":0" not in t: print("Loading weights success: " + t) # 显示新的参数 print('New parameters:', new_variable_names) # 显示初始化参数中没用到的参数 print('Unused parameters', unused_variable_names) def torch_init_model(model, init_checkpoint): state_dict = torch.load(init_checkpoint, map_location='cpu') missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') print("missing keys:{}".format(missing_keys)) print('unexpected keys:{}'.format(unexpected_keys)) print('error msgs:{}'.format(error_msgs)) def torch_save_model(model, output_dir, scores, max_save_num=1): # Save model checkpoint if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training saved_pths = glob(os.path.join(output_dir, '*.pth')) saved_pths.sort() while len(saved_pths) >= max_save_num: if os.path.exists(saved_pths[0].replace('//', '/')): os.remove(saved_pths[0].replace('//', '/')) del saved_pths[0] save_prex = "checkpoint_score" for k in scores: save_prex += ('_' + k + '-' + str(scores[k])[:6]) save_prex += '.pth' torch.save(model_to_save.state_dict(), os.path.join(output_dir, save_prex)) print("Saving model checkpoint to %s", output_dir)
4,999
33.013605
117
py
CLUE
CLUE-master/baselines/models_pytorch/mrc_pytorch/tools/file_utils.py
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import json import logging import os import shutil import tempfile from functools import wraps from hashlib import sha256 from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set from urllib.parse import urlparse import boto3 import requests from botocore.exceptions import ClientError from tqdm import tqdm logger = logging.getLogger(__name__) # pylint: disable=invalid-name PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', Path.home() / '.pytorch_pretrained_bert')) def url_to_filename(url: str, etag: str = None) -> str: """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]: """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise FileNotFoundError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise FileNotFoundError("file {} not found".format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise FileNotFoundError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func: Callable): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url: str) -> Optional[str]: """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def http_get(url: str, temp_file: IO) -> None: req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if response.status_code != 200: raise IOError("HEAD request failed for url {} with status code {}" .format(url, response.status_code)) etag = response.headers.get("ETag") filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename: str) -> Set[str]: ''' Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. ''' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path: str, dot=True, lower: bool = True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
8,020
32.560669
98
py
pointnerf
pointnerf-master/options/base_options.py
import argparse import os from models import find_model_class_by_name from data import find_dataset_class_by_name import torch class BaseOptions: def initialize(self, parser: argparse.ArgumentParser): #================================ global ================================# parser.add_argument('--experiment', type=str, required=True, dest='name', help='name of the experiment') parser.add_argument( '--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument( '--timestamp', action='store_true', help='suffix the experiment name with current timestamp') #================================ dataset ================================# parser.add_argument('--data_root', type=str, default=None, help='path to the dataset storage') parser.add_argument( '--dataset_name', type=str, default=None, help='name of dataset, determine which dataset class to use') parser.add_argument( '--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset.' 'If the dataset directory contains more than max_dataset_size, only a subset is loaded.' ) parser.add_argument('--n_threads', default=1, type=int, help='# threads for loading data') #================================ MVS ================================# parser.add_argument('--geo_cnsst_num', default=2, type=int, help='# threads for loading data') #================================ model ================================# parser.add_argument('--bgmodel', default="No", type=str, help='No | sphere | plane') parser.add_argument( '--model', type=str, required=True, help='name of model, determine which network model to use') #================================ running ================================# parser.add_argument('--batch_size', type=int, default=1, help='input batch size') parser.add_argument('--render_only', type=int, default=0, help='1 for render_only dataset') parser.add_argument('--serial_batches', type=int, default=0, help='feed batches in order without shuffling') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--show_tensorboard', type=int, default=0, help='plot loss curves with tensorboard') parser.add_argument('--resume_dir', type=str, default='', help='dir of the previous checkpoint') parser.add_argument('--resume_iter', type=str, default='latest', help='which epoch to resume from') parser.add_argument('--debug', action='store_true', help='indicate a debug run') parser.add_argument('--vid', type=int, default=0, help='feed batches in order without shuffling') parser.add_argument('--resample_pnts', type=int, default=-1, help='resample the num. initial points') parser.add_argument('--inall_img', type=int, default=1, help='all points must in the sight of all camera pose') parser.add_argument('--test_train', type=int, default=0, help='test on training set for debugging') return parser def gather_options(self): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) opt, _ = parser.parse_known_args() model_name = opt.model find_model_class_by_name(model_name).modify_commandline_options( parser, self.is_train) dataset_name = opt.dataset_name if dataset_name is not None: find_dataset_class_by_name( dataset_name).modify_commandline_options( parser, self.is_train) self.parser = parser return parser.parse_args() def print_and_save_options(self, opt): message = '' message += '----------------- Options ---------------\n' for k, v in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: {}]'.format(str(default)) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' # print(message) # if opt.is_train: # expr_dir = os.path.join(opt.checkpoints_dir, opt.name) # else: # expr_dir = os.path.join(opt.resume_dir, opt.name) expr_dir = os.path.join(opt.checkpoints_dir, opt.name) os.makedirs(expr_dir, exist_ok=True) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n') def parse(self): opt = self.gather_options() opt.is_train = self.is_train if opt.timestamp: import datetime now = datetime.datetime.now().strftime('%y-%m-%d_%H:%M:%S') opt.name = opt.name + '_' + now self.print_and_save_options(opt) str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [ int(x) for x in opt.gpu_ids.split(',') if x.strip() and int(x) >= 0 ] if len(opt.gpu_ids) > 0: torch.cuda.set_device(opt.gpu_ids[0]) self.opt = opt return self.opt
7,159
38.125683
107
py
pointnerf
pointnerf-master/models/base_rendering_model.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from utils import format as fmt import os from .base_model import BaseModel from .rendering.diff_render_func import find_render_function, find_blend_function, find_tone_map, alpha_blend from .rendering.diff_ray_marching import find_ray_generation_method, find_refined_ray_generation_method, ray_march, alpha_ray_march from utils import format as fmt from utils.spherical import SphericalHarm, SphericalHarm_table from utils.util import add_property2dict from torch.autograd import Variable from PIL import Image def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) class BaseRenderingModel(BaseModel): ''' A base rendering model that provides the basic loss functions, selctions of different rendering functions, ray generation functions, blending functions (for collocated and non-collocated ray marching), and functions to setup encoder and decoders. A sub model needs to at least re-implement create_network_models() and run_network_models() for actual rendering. Examples are: hirarchical_volumetric_model etc. The model collects ''' @staticmethod def modify_commandline_options(parser, is_train=True): # loss parameters parser.add_argument( "--sparse_loss_weight", type=float, default=0, help="The (multiple) output items to supervise with gt color.") parser.add_argument( "--color_loss_items", type=str, nargs='+', default=None, help="The (multiple) output items to supervise with gt color.") parser.add_argument( "--test_color_loss_items", type=str, nargs='+', default=None, help="The (multiple) output items to supervise with gt color.") parser.add_argument( "--color_loss_weights", type=float, nargs='+', default=[1.0], help= "The weights for each color supervision item. The number of this args should be 1 or match the number in --color_loss_items" ) parser.add_argument( "--bg_loss_items", type=str, nargs='+', default=[], help="The (multiple) output items to supervise with gt masks.") parser.add_argument( "--bg_loss_weights", type=float, nargs='+', default=[1.0], help= "The weights for each mask supervision item. The number of this args should be 1 or match the number in --bg_loss_items" ) parser.add_argument( "--depth_loss_items", type=str, nargs='+', default=[], help="The (multiple) output items to supervise with gt depth.") parser.add_argument( "--depth_loss_weights", type=float, nargs='+', default=[1.0], help= "The weights for each depth supervision item. The number of this args should be 1 or match the number in --depth_loss_items" ) parser.add_argument( "--zero_one_loss_items", type=str, nargs='+', default=[], help= "The (multiple) output items to regularize to be close to either 0 or 1 ." ) parser.add_argument( "--zero_one_loss_weights", type=float, nargs='+', default=[1.0], help= "The weights for each zero_one regularization item. The number of this args should be 1 or match the number in --zero_one_loss_items" ) parser.add_argument( "--l2_size_loss_items", type=str, nargs='+', default=[], help= "The (multiple) output items to regularize to be close to either 0 or 1 ." ) parser.add_argument( "--l2_size_loss_weights", type=float, nargs='+', default=[0.0], help= "The weights for each zero_one regularization item. The number of this args should be 1 or match the number in --zero_one_loss_items" ) parser.add_argument( "--zero_epsilon", type=float, default=1e-3, help="epsilon in logarithmic regularization terms when needed.", ) parser.add_argument( "--no_loss", type=int, default=False, help="do not compute loss.", ) #visualization terms parser.add_argument( "--visual_items", type=str, nargs='*', default=None, help= "The (multiple) output items to show as images. This will replace the default visual items" ) parser.add_argument( "--visual_items_additional", type=str, nargs='+', default=[], help= "The (multiple) output items to show as images in addition to default items. This is ignored if --visual_iterms is used" ) parser.add_argument( '--out_channels', type=int, default=None, help= 'number of output channels in decoder; default 4 for radiance, 8 for microfacet and others' ) # ray generation parser.add_argument( '--which_ray_generation', type=str, default='cube', help='which ray point generation method to use [cube]') parser.add_argument('--domain_size', type=int, default=1, help='Size of the ray marching domain') # rendering functions parser.add_argument('--which_render_func', type=str, default='microfacet', help='which render method to use') parser.add_argument( '--which_blend_func', type=str, default='alpha', help= 'which blend function to use. Hint: alpha2 for collocated, alpha for non-collocated' ) parser.add_argument('--which_tonemap_func', type=str, default='gamma', help='which tone map function to use.') parser.add_argument( '--num_pos_freqs', type=int, default=-1, help= 'number of frequency for position encoding if using nerf or mixed mlp decoders' ) parser.add_argument( '--num_viewdir_freqs', type=int, default=-1, help= 'number of frequency for view direction encoding if using nerf decoders' ) parser.add_argument( '--num_feature_freqs', type=int, default=-1, help= 'number of frequency for feature encoding if using mixed mlp decoders' ) return parser def add_default_color_losses(self, opt): ''' if no color loss terms are specified, this function is called to add default supervision into opt.color_loss_items ''' opt.color_loss_items = [] # add this to actual names in subclasses def add_default_visual_items(self, opt): ''' if no visual terms are specified, this function is called to add default visualization items ''' opt.visual_items = ['gt_image' ] # add this to actual names in subclasses def check_setup_loss(self, opt): ''' this function check and setup all loss items and weights.''' self.loss_names = ['total'] if not opt.color_loss_items: self.add_default_color_losses(opt) if len(opt.color_loss_weights) != 1 and len( opt.color_loss_weights) != len(opt.color_loss_items): print(fmt.RED + "color_loss_weights does not match loss items" + fmt.END) exit() if len(opt.color_loss_weights) == 1 and len(opt.color_loss_items) > 1: opt.color_loss_weights = np.ones(len( opt.color_loss_items), np.float32) * opt.color_loss_weights[0] self.loss_names += opt.color_loss_items if len(opt.depth_loss_weights) != 1 and len( opt.depth_loss_weights) != len(opt.depth_loss_items): print(fmt.RED + "color_depth_weights does not match loss items" + fmt.END) exit() if len(opt.depth_loss_weights) == 1 and len(opt.depth_loss_items) > 1: opt.depth_loss_weights = np.ones(len( opt.depth_loss_items), np.float32) * opt.depth_loss_weights[0] self.loss_names += opt.depth_loss_items if len(opt.zero_one_loss_weights) != len( opt.zero_one_loss_items) and len( opt.zero_one_loss_weights) != 1: print(fmt.RED + "zero_one_loss_weights does not match loss items" + fmt.END) exit() if len(opt.zero_one_loss_weights) == 1 and len( opt.zero_one_loss_items) > 1: opt.zero_one_loss_weights = np.ones( len(opt.zero_one_loss_items), np.float32) * opt.zero_one_loss_weights[0] self.loss_names += opt.zero_one_loss_items if len(opt.bg_loss_weights) != 1 and len(opt.bg_loss_weights) != len( opt.bg_loss_items): print(fmt.RED + "bg_loss_weights does not match loss items" + fmt.END) exit() if len(opt.bg_loss_weights) == 1 and len(opt.bg_loss_items) > 1: opt.bg_loss_weights = np.ones(len(opt.bg_loss_items), np.float32) * opt.bg_loss_weights[0] self.loss_names += opt.bg_loss_items if opt.sparse_loss_weight > 0: self.loss_names += ["sparse"] # add the functions used in losses self.l1loss = torch.nn.L1Loss().to(self.device) self.l2loss = torch.nn.MSELoss().to(self.device) def check_setup_visuals(self, opt): if opt.visual_items is None: print("visual_items not ", opt.visual_items) self.add_default_visual_items(opt) self.visual_names += opt.visual_items self.visual_names += opt.visual_items_additional else: self.visual_names += opt.visual_items if len(self.visual_names) == 0: print(fmt.YELLOW + "No items are visualized" + fmt.END) def create_network_models(self, opt): ''' This function should create the rendering networks. Every subnetwork model needs to be named as self.net_"name", and the "name" needs to be added to the self.model_names list. An example of this is like: self.model_names = ['ray_marching'] self.net_ray_marching = network_torch_model(self.opt) if self.opt.gpu_ids: self.net_ray_marching.to(self.device) self.net_ray_marching = torch.nn.DataParallel( self.net_ray_marching, self.opt.gpu_ids) ''' pass def run_network_models(self): ''' This function defines how the network is run. This function should use the self.input as input to the network. and return a dict of output (that will be assign to self.output). If only a sinlge network is used, this function could be simply just: return net_module(**self.input) ''' raise NotImplementedError() def prepare_network_parameters(self, opt): ''' Setup the parameters the network is needed. By default, it finds rendering (shading) function, ray generation function, tonemap function, etc. ''' self.check_setup_loss(opt) if len(self.loss_names) == 1 and opt.is_train == True: print(fmt.RED + "Requiring losses to train" + fmt.END) raise NotImplementedError() self.check_setup_visuals(opt) self.check_setup_renderFunc_channels(opt) self.blend_func = find_blend_function(opt.which_blend_func) self.raygen_func = find_ray_generation_method(opt.which_ray_generation) self.tonemap_func = find_tone_map(opt.which_tonemap_func) self.found_funcs = {} add_property2dict( self.found_funcs, self, ["blend_func", "raygen_func", "tonemap_func", "render_func"]) def setup_optimizer(self, opt): ''' Setup the optimizers for all networks. This assumes network modules have been added to self.model_names By default, it uses an adam optimizer for all parameters. ''' params = [] for name in self.model_names: net = getattr(self, 'net_' + name) params = params + list(net.parameters()) self.optimizers = [] self.optimizer = torch.optim.Adam(params, lr=opt.lr, betas=(0.9, 0.999)) self.optimizers.append(self.optimizer) def check_opts(self, opt): pass def initialize(self, opt): super(BaseRenderingModel, self).initialize(opt) self.opt = opt if self.is_train: self.check_opts(opt) self.prepare_network_parameters(opt) self.create_network_models(opt) #check model creation if not self.model_names: print( fmt.RED + "No network is implemented! Or network's name is not properly added to self.model_names" + fmt.END) raise NotImplementedError() for mn in self.model_names: if not hasattr(self, "net_" + mn): print(fmt.RED + "Network " + mn + " is missing" + fmt.END) raise NotImplementedError() # setup optimizer if self.is_train: self.setup_optimizer(opt) def set_input(self, input): # setup self.input # this dict is supposed to be sent the network via **self.input in run_network_modules self.input = input for key, item in self.input.items(): if isinstance(item, torch.Tensor): self.input[key] = item.to(self.device) # gt required in loss compute self.gt_image = self.input['gt_image'].to( self.device) if 'gt_image' in input else None self.gt_depth = self.input['gt_depth'].to( self.device) if 'gt_depth' in input else None self.gt_mask = self.input['gt_mask'].to( self.device) if 'gt_mask' in input else None def set_visuals(self): for key, item in self.output.items(): if key in self.visual_names: setattr(self, key, item) if "coarse_raycolor" not in self.visual_names: key = "coarse_raycolor" setattr(self, key, self.output[key]) def check_setup_renderFunc_channels(self, opt): ''' Find render functions; the function is often used by subclasses when creating rendering networks. ''' self.render_func = find_render_function(opt.which_render_func) if opt.which_render_func == 'radiance': if opt.out_channels is None: opt.out_channels = 4 elif opt.which_render_func == 'microfacet': if opt.out_channels is None: opt.out_channels = 8 elif opt.which_render_func == 'harmonics': if opt.out_channels is None: opt.out_channels = 1 + 3 * 5 * 5 deg = int(((opt.out_channels - 1) / 3)**0.5) if 1 + deg * deg * 3 != opt.out_channels: print( fmt.RED + '[Error] output channels should match the number of sh basis' + fmt.END) exit() if deg <= 5: print("using SH table") self.shcomputer = SphericalHarm_table(deg) else: print("using runtime SH") self.shcomputer = SphericalHarm(deg) self.render_func.sphericalHarm = self.shcomputer else: if opt.out_channels is None: opt.out_channels = 8 self.out_channels = opt.out_channels def check_getDecoder(self, opt, **kwargs): '''construct a decoder; this is often used by subclasses when creating networks.''' decoder = None if opt.which_decoder_model == 'mlp': decoder = MlpDecoder(num_freqs=opt.num_pos_freqs, out_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'viewmlp': decoder = ViewMlpDecoder(num_freqs=opt.num_pos_freqs, num_viewdir_freqs=opt.num_viewdir_freqs, num_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'viewmlpsml': decoder = ViewMlpSmlDecoder(num_freqs=opt.num_pos_freqs, num_viewdir_freqs=opt.num_viewdir_freqs, num_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'viewmlpmid': decoder = ViewMlpMidDecoder(num_freqs=opt.num_pos_freqs, num_viewdir_freqs=opt.num_viewdir_freqs, num_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'nv_mlp': decoder = VolumeDecoder(256, template_type=opt.nv_template_type, template_res=opt.nv_resolution, out_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'discrete_microfacet': decoder = DiscreteVolumeMicrofacetDecoder( opt.discrete_volume_folder, out_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'discrete_general': decoder = DiscreteVolumeGeneralDecoder( opt.discrete_volume_folder, out_channels=opt.out_channels, **kwargs) elif opt.which_decoder_model == 'mixed_mlp': decoder = MixedDecoder(256, template_type=opt.nv_template_type, template_res=opt.nv_resolution, mlp_channels=128, out_channels=opt.out_channels, position_freqs=opt.num_pos_freqs, feature_freqs=opt.num_feature_freqs, **kwargs) elif opt.which_decoder_model == 'mixed_separate_code': decoder = MixedSeparatedDecoder( 256, template_type=opt.nv_template_type, template_res=opt.nv_resolution, mlp_channels=128, out_channels=opt.out_channels, position_freqs=opt.num_pos_freqs, feature_freqs=opt.num_feature_freqs, **kwargs) else: raise RuntimeError('Unknown decoder model: ' + opt.which_decoder_model) return decoder def forward(self): self.output = self.run_network_models() self.set_visuals() if not self.opt.no_loss: self.compute_losses() def save_image(self, img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def compute_losses(self): ''' Compute loss functions. The total loss is saved in self.loss_total. Every loss will be set to an attr, self.loss_lossname ''' self.loss_total = 0 opt = self.opt #color losses for i, name in enumerate(opt.color_loss_items): if name.startswith("ray_masked"): unmasked_name = name[len("ray_masked")+1:] masked_output = torch.masked_select(self.output[unmasked_name], (self.output["ray_mask"] > 0)[..., None].expand(-1, -1, 3)).reshape(1, -1, 3) masked_gt = torch.masked_select(self.gt_image, (self.output["ray_mask"] > 0)[..., None].expand(-1, -1, 3)).reshape(1, -1, 3) if masked_output.shape[1] > 0: loss = self.l2loss(masked_output, masked_gt) else: loss = torch.tensor(0.0, dtype=torch.float32, device=masked_output.device) # print("loss", name, torch.max(torch.abs(loss))) elif name.startswith("ray_miss"): unmasked_name = name[len("ray_miss") + 1:] masked_output = torch.masked_select(self.output[unmasked_name], (self.output["ray_mask"] == 0)[..., None].expand(-1, -1, 3)).reshape( 1, -1, 3) masked_gt = torch.masked_select(self.gt_image,(self.output["ray_mask"] == 0)[..., None].expand(-1, -1, 3)).reshape(1, -1, 3) if masked_output.shape[1] > 0: loss = self.l2loss(masked_output, masked_gt) * masked_gt.shape[1] else: loss = torch.tensor(0.0, dtype=torch.float32, device=masked_output.device) elif name.startswith("ray_depth_masked"): pixel_xy = self.input["pixel_idx"][0].long() ray_depth_mask = self.output["ray_depth_mask"][0][pixel_xy[...,1], pixel_xy[...,0]] > 0 unmasked_name = name[len("ray_depth_masked")+1:] masked_output = torch.masked_select(self.output[unmasked_name], (ray_depth_mask[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)) masked_gt = torch.masked_select(self.gt_image, (ray_depth_mask[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)) loss = self.l2loss(masked_output, masked_gt) # print("loss", loss) # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor") # filepath = os.path.join( # "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # csave = torch.zeros((1, 512, 640, 3)) # ray_masks = (self.output["ray_mask"] > 0).reshape(1, -1) # pixel_xy = self.input["pixel_idx"].reshape(1, -1, 2)[ray_masks, :] # # print("masked_output", masked_output.shape, pixel_xy.shape) # csave[:, pixel_xy[..., 1].long(), pixel_xy[..., 0].long(), :] = masked_output.cpu() # img = csave.view(512, 640, 3).detach().numpy() # self.save_image(img, filepath) # # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt") # filepath = os.path.join( # "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # csave = torch.zeros((1, 512, 640, 3)) # ray_masks = (self.output["ray_mask"] > 0).reshape(1, -1) # pixel_xy = self.input["pixel_idx"].reshape(1, -1, 2)[ray_masks, :] # # print("masked_output", masked_output.shape, pixel_xy.shape) # csave[:, pixel_xy[..., 1].long(), pixel_xy[..., 0].long(), :] = masked_gt.cpu() # img = csave.view(512, 640, 3).detach().numpy() # self.save_image(img, filepath) # print("psnrkey recal:",mse2psnr(torch.nn.MSELoss().to("cuda")(masked_output, masked_gt)) ) else: if name not in self.output: print(fmt.YELLOW + "No required color loss item: " + name + fmt.END) # print("no_mask") loss = self.l2loss(self.output[name], self.gt_image) # print("loss", name, torch.max(torch.abs(loss))) self.loss_total += (loss * opt.color_loss_weights[i] + 1e-6) # loss.register_hook(lambda grad: print(torch.any(torch.isnan(grad)), grad, opt.color_loss_weights[i])) setattr(self, "loss_" + name, loss) # print(torch.sum(self.output["ray_mask"])) #depth losses for i, name in enumerate(opt.depth_loss_items): if name not in self.output: print(fmt.YELLOW + "No required depth loss item: " + name + fmt.END) loss = self.l2loss(self.output[name] * self.gt_mask, self.gt_depth * self.gt_mask) self.loss_total += loss * opt.depth_loss_weights[i] setattr(self, "loss_" + name, loss) #background losses for i, name in enumerate(opt.bg_loss_items): if name not in self.output: print(fmt.YELLOW + "No required mask loss item: " + name + fmt.END) loss = self.l2loss(self.output[name] * (1 - self.gt_mask), 1 - self.gt_mask) self.loss_total += loss * opt.bg_loss_weights[i] setattr(self, "loss_" + name, loss) #zero_one regularization losses for i, name in enumerate(opt.zero_one_loss_items): if name not in self.output: print(fmt.YELLOW + "No required zero_one loss item: " + name + fmt.END) # setattr(self, "loss_" + name, torch.zeros([1], device="cuda", dtype=torch.float32)) else: val = torch.clamp(self.output[name], self.opt.zero_epsilon, 1 - self.opt.zero_epsilon) # print("self.output[name]",torch.min(self.output[name]), torch.max(self.output[name])) loss = torch.mean(torch.log(val) + torch.log(1 - val)) self.loss_total += loss * opt.zero_one_loss_weights[i] setattr(self, "loss_" + name, loss) # l2 square regularization losses for i, name in enumerate(opt.l2_size_loss_items): if name not in self.output: print(fmt.YELLOW + "No required l2_size_loss_item : " + name + fmt.END) loss = self.l2loss(self.output[name], torch.zeros_like(self.output[name])) # print("self.output[name]", self.output[name].shape, loss.shape) self.loss_total += loss * opt.l2_size_loss_weights[i] setattr(self, "loss_" + name, loss) if opt.sparse_loss_weight > 0: # weight and conf_coefficient 1, 1134, 40, 8 if "weight" not in self.output or "conf_coefficient" not in self.output: print(fmt.YELLOW + "No required sparse_loss_weight weight or conf_coefficient : " + fmt.END) loss = torch.sum(self.output["weight"] * torch.abs(1 - torch.exp(-2 * self.output["conf_coefficient"]))) / (torch.sum(self.output["weight"]) + 1e-6) # print("self.output[name]", self.output[name].shape, loss.shape) self.output.pop('weight') self.output.pop('conf_coefficient') self.loss_total += loss * opt.sparse_loss_weight setattr(self, "loss_sparse", loss) # self.loss_total = Variable(self.loss_total, requires_grad=True) def backward(self): self.optimizer.zero_grad() if self.opt.is_train: self.loss_total.backward() self.optimizer.step() def optimize_parameters(self, backward=True, total_steps=0): self.forward() self.backward()
28,653
41.45037
160
py
pointnerf
pointnerf-master/models/mvs_points_volumetric_model.py
from .base_rendering_model import * from .neural_points_volumetric_model import NeuralPointsVolumetricModel from .neural_points.neural_points import NeuralPoints from .mvs.mvs_points_model import MvsPointsModel from .mvs import mvs_utils from. import base_model from .aggregators.point_aggregators import PointAggregator import os import torch.nn.functional as F import time from utils import format as fmt class MvsPointsVolumetricModel(NeuralPointsVolumetricModel): def __init__(self,): super().__init__() self.optimizer, self.neural_point_optimizer, self.output, self.raygen_func, self.render_func, self.blend_func, self.coarse_raycolor, self.gt_image, self.input, self.l1loss, self.l2loss, self.tonemap_func, self.top_ray_miss_ids, self.top_ray_miss_loss, self.loss_ray_masked_coarse_raycolor, self.loss_ray_miss_coarse_raycolor, self.loss_total, self.loss_coarse_raycolor, self.loss_conf_coefficient = None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None # @staticmethod def modify_commandline_options(parser, is_train=True): MvsPointsModel.modify_commandline_options(parser, is_train) NeuralPointsVolumetricModel.modify_commandline_options(parser, is_train=is_train) parser.add_argument( '--mode', type=int, default=0, help='0 for both mvs and pointnerf, 1 for only mvs, 2 for only pointnerf') parser.add_argument( '--add_shading_dist', type=int, default=0, help='0 for both mvs and pointnerf, 1 for only mvs, 2 for only pointnerf') def create_network_models(self, opt): if opt.mode != 2: self.net_mvs = MvsPointsModel(opt).to(self.device) self.model_names = ['mvs'] if opt.mode != 1: super(MvsPointsVolumetricModel, self).create_network_models(opt) def setup_optimizer(self, opt): ''' Setup the optimizers for all networks. This assumes network modules have been added to self.model_names By default, it uses an adam optimizer for all parameters. ''' net_params = [] neural_params = [] mvs_params = [] self.optimizers = [] for name in self.model_names: net = getattr(self, 'net_' + name) if name == "mvs": # print([[par[0], torch.typename(par[1])] for par in net.named_parameters()]) param_lst = list(net.named_parameters()) mvs_params = mvs_params + [par[1] for par in param_lst] else: param_lst = list(net.named_parameters()) net_params = net_params + [par[1] for par in param_lst if not par[0].startswith("module.neural_points")] neural_params = neural_params + [par[1] for par in param_lst if par[0].startswith("module.neural_points")] self.net_params = net_params self.neural_params = neural_params self.mvs_params = mvs_params mvs_lr = opt.mvs_lr if opt.mvs_lr is not None else opt.lr if len(mvs_params) > 0: self.mvs_optimizer = torch.optim.Adam(mvs_params, lr=mvs_lr, betas=(0.9, 0.999)) self.optimizers.append(self.mvs_optimizer) if len(net_params) > 0: self.optimizer = torch.optim.Adam(net_params, lr=opt.lr, betas=(0.9, 0.999)) self.optimizers.append(self.optimizer) if len(neural_params) > 0: self.neural_point_optimizer = torch.optim.Adam(neural_params, lr=opt.plr, #/ 5.0, betas=(0.9, 0.999)) self.optimizers.append(self.neural_point_optimizer) print("neural_params", [(par[0], par[1].shape, par[1].requires_grad) for par in param_lst if par[0].startswith("module.neural_points")]) else: # When not doing per-scene optimization print("no neural points as nn.Parameter") def backward(self, iters): [optimizer.zero_grad() for optimizer in self.optimizers] if self.opt.is_train: # print("self.loss_total", self.ray_masked_coarse_color.grad) # print("self.loss_total", self.loss_total) if self.loss_total != 0: self.loss_total.backward() else: print(fmt.RED + "Loss == 0" + fmt.END) if self.opt.feedforward: if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 0: self.optimizer.step() if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 1: self.mvs_optimizer.step() else: if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 0: self.optimizer.step() if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 1: self.neural_point_optimizer.step() def forward(self): if self.opt.mode != 2: points_xyz, points_embedding, points_colors, points_dirs, points_conf = self.net_mvs(self.input) # print("volume_feature", volume_feature.shape) self.neural_points.set_points(points_xyz, points_embedding, points_color=points_colors, points_dir=points_dirs, points_conf=points_conf, parameter=self.opt.feedforward==0) # if feedforward, no neural points optimization self.output = self.run_network_models() if "depths_h" in self.input: depth_gt = self.input["depths_h"][:,self.opt.trgt_id,...] if self.input["depths_h"].dim() > 3 else self.input["depths_h"] self.output["ray_depth_mask"] = depth_gt > 0 self.set_visuals() if not self.opt.no_loss: self.compute_losses() def update_rank_ray_miss(self, total_steps): if (self.opt.prob_kernel_size is None or np.sum(np.asarray(self.opt.prob_tiers) < total_steps) < (len(self.opt.prob_kernel_size) // 3)): if self.opt.prob_freq > 0 and self.opt.prob_num_step > 1: self.top_ray_miss_loss, self.top_ray_miss_ids = self.rank_ray_miss(self.input["id"][0], self.loss_ray_miss_coarse_raycolor, self.top_ray_miss_ids, self.top_ray_miss_loss) elif self.opt.prob_freq > 0 and self.opt.prob_num_step == 1: self.top_ray_miss_loss[0] = max(self.loss_ray_miss_coarse_raycolor, self.top_ray_miss_loss[0]) def rank_ray_miss(self, new_id, newloss, inds, losses): with torch.no_grad(): mask = (inds - new_id) == 0 if torch.sum(mask) > 0: losses[mask] = max(newloss, losses[mask]) else: inds[-1] = new_id losses[-1] = newloss losses, indices = torch.sort(losses, descending=True) inds = inds[indices] return losses, inds def setup(self, opt, train_len=None): super(MvsPointsVolumetricModel, self).setup(opt) if opt.prob_freq > 0 and train_len is not None and opt.prob_num_step > 1: self.num_probe = train_len // opt.prob_num_step self.reset_ray_miss_ranking() elif opt.prob_freq > 0 and train_len is not None and opt.prob_num_step == 1: self.top_ray_miss_loss=torch.zeros([1], dtype=torch.float32, device=self.device) def reset_ray_miss_ranking(self): self.top_ray_miss_loss = torch.zeros([self.num_probe + 1], dtype=torch.float32, device=self.device) self.top_ray_miss_ids = torch.arange(self.num_probe + 1, dtype=torch.int32, device=self.device) def set_points(self, points_xyz, points_embedding, points_color=None, points_dir=None, points_conf=None, Rw2c=None, eulers=None, editing=False): if not editing: self.neural_points.set_points(points_xyz, points_embedding, points_color=points_color, points_dir=points_dir, points_conf=points_conf, parameter=self.opt.feedforward == 0, Rw2c=Rw2c, eulers=eulers) else: self.neural_points.editing_set_points(points_xyz, points_embedding, points_color=points_color, points_dir=points_dir, points_conf=points_conf, parameter=self.opt.feedforward == 0, Rw2c=Rw2c, eulers=eulers) if self.opt.feedforward == 0 and self.opt.is_train: self.setup_optimizer(self.opt) def prune_points(self, thresh): self.neural_points.prune(thresh) def clean_optimizer_scheduler(self): # self.neural_points.querier.clean_up() self.optimizers.clear() self.schedulers.clear() self.neural_params.clear() self.mvs_params.clear() # self.optimizer.cpu(), self.neural_point_optimizer.cpu() del self.optimizer, self.neural_point_optimizer, self.optimizers, self.schedulers, self.mvs_params, self.neural_params def reset_optimizer(self, opt): self.clean_optimizer() self.setup_optimizer(opt) def clean_optimizer(self): self.optimizers.clear() self.net_params.clear() self.neural_params.clear() self.mvs_params.clear() del self.optimizer, self.neural_point_optimizer, self.net_params, self.neural_params, self.mvs_params def clean_scheduler(self): for scheduler in self.schedulers: del scheduler self.schedulers.clear() del self.schedulers def init_scheduler(self, total_steps, opt): self.schedulers = [ base_model.get_scheduler(optim, opt) for optim in self.optimizers ] if total_steps > 0: for scheduler in self.schedulers: for i in range(total_steps): scheduler.step() def reset_scheduler(self, total_steps, opt): self.schedulers.clear() self.schedulers = [ base_model.get_scheduler(optim, opt) for optim in self.optimizers ] if total_steps > 0: for scheduler in self.schedulers: for i in range(total_steps): scheduler.step() def gen_points(self): cam_xyz_lst, photometric_confidence_lst, point_mask_lst, HDWD, data_mvs, intrinsics_lst, extrinsics_lst = self.net_mvs.gen_points(self.input) # print("cam_xyz_lst", cam_xyz_lst[0].shape, torch.min(cam_xyz_lst[0].view(-1,3), dim=-2)[0], torch.max(cam_xyz_lst[0].view(-1,3), dim=-2)[0]) # self.net_mvs.gen_bg_points(self.input) return cam_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, data_mvs['c2ws'], data_mvs['w2cs'], self.input["intrinsics"], self.input["near_fars"] def query_embedding(self, HDWD, cam_xyz, photometric_confidence, imgs, c2ws, w2cs, intrinsics, cam_vid, pointdir_w=True): img_feats = self.net_mvs.get_image_features(imgs) return self.net_mvs.query_embedding(HDWD, cam_xyz, photometric_confidence, img_feats, c2ws, w2cs, intrinsics, cam_vid, pointdir_w=pointdir_w) def grow_points(self, points_xyz, points_embedding, points_color, points_dir, points_conf): self.neural_points.grow_points(points_xyz, points_embedding, points_color, points_dir, points_conf) # self.neural_points.reset_querier() def cleanup(self): if hasattr(self, "neural_points"): self.neural_points.querier.clean_up() del self.neural_points.querier self.neural_points.cpu() del self.neural_points print("self.model_names", self.model_names) if hasattr(self, "net_ray_marching"): self.net_ray_marching.cpu() del self.net_ray_marching if hasattr(self, "net_mvs"): self.net_mvs.cpu() del self.net_mvs if hasattr(self, "net_params"): self.net_params.clear() del self.net_params if hasattr(self, "neural_params"): self.neural_params.clear() del self.neural_params if hasattr(self, "mvs_params"): self.mvs_params.clear() del self.mvs_params if hasattr(self, "aggregator"): self.aggregator.cpu() del self.aggregator if hasattr(self, "optimizers"): self.optimizers.clear() self.schedulers.clear() del self.optimizer, self.neural_point_optimizer, self.output, self.raygen_func, self.render_func, self.blend_func, self.coarse_raycolor, self.gt_image, self.input, self.l1loss, self.l2loss, self.tonemap_func, self.top_ray_miss_ids, self.top_ray_miss_loss, self.loss_ray_masked_coarse_raycolor, self.loss_ray_miss_coarse_raycolor, self.loss_total, self.loss_coarse_raycolor, self.loss_conf_coefficient def set_bg(self, xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, plane_color, fg_masks=None,**kwargs): warped_feats = [] c2w = torch.eye(4, device="cuda", dtype=torch.float32)[None, ...] # c2w[:,0,...].cuda() count=0 mask_lst = [] fg_mask_lst = [] for imgs, w2c, intrinsics, HDWD in zip(img_lst, w2cs_lst, intrinsics_all, HDWD_lst): # "fg_2d_masks", [1, 1, 512, 640] # c2w: 1, 3, 4, 4, w2c: 1, 3, 4, 4, intrinsics: 1, 3, 3 HD, WD = HDWD[0], HDWD[1] w2c = w2c[:,0,...] warp = mvs_utils.homo_warp_nongrid # homo_warp_nongrid_occ if self.args.depth_occ > 0 else homo_warp_nongrid src_grid, mask, hard_id_xy = warp(c2w, w2c, intrinsics, xyz_world_sect_plane, HD, WD, filter=False, tolerate=0.1) hard_id_xy_valid = hard_id_xy[:, mask[0,:,0], :] if fg_masks is None: fg_mask = mvs_utils.homo_warp_fg_mask(c2w, w2c, intrinsics, self.neural_points.xyz[None,...], HD, WD, tolerate=0.1) fg_mask_lst.append(fg_mask) else: fg_mask = fg_masks[:,count,...] mask[0,mask[0,...,0].clone(),0] = (fg_mask[hard_id_xy_valid[0, ..., 1].long(), hard_id_xy_valid[ 0, ..., 0].long()] < 1) # src_grid: 1, 2032, 2 src_grid = src_grid[:, mask[0,...,0], :] mask_lst.append(mask[0,...,0]) warped_src_feat = mvs_utils.extract_from_2d_grid(imgs[0:1, ...], src_grid.cpu(), mask.cpu()) warped_feats.append(warped_src_feat.cuda()) count+=1 # masks = ~torch.stack(mask_lst, dim = -1) # 2304, 16 fg warped_feats = torch.stack(warped_feats, dim=-2) # 1, 2304, 16, 3 thresh=0.03 fit_mask = torch.prod(torch.logical_and(warped_feats >= (plane_color - thresh), (warped_feats <= plane_color + thresh)), dim=-1) nofit_feats_inds = (1-fit_mask).nonzero() # 1, 2304, 16 warped_feats[0, nofit_feats_inds[...,1], nofit_feats_inds[...,2], :] = 0 warped_feats = torch.max(warped_feats, dim=-2)[0] fg_masks = torch.stack(fg_mask_lst, dim=1) if fg_mask_lst is None else fg_masks return warped_feats, fg_masks # def load_networks(self, epoch): for name, net in zip(self.model_names, self.get_networks()): assert isinstance(name, str) load_filename = '{}_net_{}.pth'.format(epoch, name) load_path = os.path.join(self.opt.resume_dir, load_filename) print('loading', name, " from ", load_path) if not os.path.isfile(load_path): print('cannot load', load_path) continue state_dict = torch.load(load_path, map_location=self.device) if epoch=="best" and name == "ray_marching" and self.opt.default_conf > 0.0 and self.opt.default_conf <= 1.0 and self.neural_points.points_conf is not None: assert "neural_points.points_conf" not in state_dict state_dict["neural_points.points_conf"] = torch.ones_like(self.net_ray_marching.module.neural_points.points_conf) * self.opt.default_conf if isinstance(net, nn.DataParallel): net = net.module net.load_state_dict(state_dict, strict=False) def test(self, gen_points=False): with torch.no_grad(): if gen_points: self.forward() else: self.output = self.run_network_models() if "depths_h" in self.input: depth_gt = self.input["depths_h"][:, self.opt.trgt_id, ...] if self.input["depths_h"].dim() > 3 else self.input["depths_h"] self.output["ray_depth_mask"] = depth_gt > 0 self.set_visuals() if not self.opt.no_loss: self.compute_losses() return self.output
16,910
48.017391
519
py
pointnerf
pointnerf-master/models/base_model.py
import torch from torch import nn import os from .helpers.networks import get_scheduler class BaseModel: @staticmethod def modify_commandline_options(parser, is_train): return parser def name(self): return self.__class__.__name__ def initialize(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.is_train = opt.is_train self.device = torch.device('cuda:{}'.format(self.gpu_ids[0]) if self. gpu_ids else torch.device('cpu')) self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) torch.backends.cudnn.benchmark = True self.loss_names = [] # losses to report self.model_names = [] # models that will be used self.visual_names = [] # visuals to show at test time def set_input(self, input: dict): self.input = input def forward(self): '''Run the forward pass. Read from self.input, set self.output''' raise NotImplementedError() def setup(self, opt): '''Creates schedulers if train, Load and print networks if resume''' if self.is_train: self.schedulers = [ get_scheduler(optim, opt) for optim in self.optimizers ] if not self.is_train or opt.resume_dir: print("opt.resume_iter!!!!!!!!!", opt.resume_iter) self.load_networks(opt.resume_iter) self.print_networks(opt.verbose) def eval(self): '''turn on eval mode''' for net in self.get_networks(): net.eval() def train(self): for net in self.get_networks(): net.train() def test(self): with torch.no_grad(): self.forward() def get_networks(self) -> [nn.Module]: ret = [] for name in self.model_names: assert isinstance(name, str) net = getattr(self, 'net_{}'.format(name)) assert isinstance(net, nn.Module) ret.append(net) return ret def get_current_visuals(self, data=None): ret = {} for name in self.visual_names: assert isinstance(name, str) if name not in ["gt_image_ray_masked", "ray_depth_masked_gt_image", "ray_depth_masked_coarse_raycolor", "ray_masked_coarse_raycolor"]: ret[name] = getattr(self, name) if "coarse_raycolor" not in self.visual_names: ret["coarse_raycolor"] = getattr(self, "coarse_raycolor") return ret def get_current_losses(self): ret = {} for name in self.loss_names: assert isinstance(name, str) ret[name] = getattr(self, 'loss_' + name) return ret def save_networks(self, epoch, other_states={}, back_gpu=True): for name, net in zip(self.model_names, self.get_networks()): save_filename = '{}_net_{}.pth'.format(epoch, name) save_path = os.path.join(self.save_dir, save_filename) try: if isinstance(net, nn.DataParallel): net = net.module net.cpu() torch.save(net.state_dict(), save_path) if back_gpu: net.cuda() except Exception as e: print("savenet:", e) save_filename = '{}_states.pth'.format(epoch) save_path = os.path.join(self.save_dir, save_filename) torch.save(other_states, save_path) def load_networks(self, epoch): for name, net in zip(self.model_names, self.get_networks()): print('loading pth') assert isinstance(name, str) load_filename = '{}_net_{}.pth'.format(epoch, name) print("loading epoch, name", epoch, name) load_path = os.path.join(self.opt.resume_dir, load_filename) if not os.path.isfile(load_path): print('cannot load', load_path) continue state_dict = torch.load(load_path, map_location=self.device) if isinstance(net, nn.DataParallel): net = net.module net.load_state_dict(state_dict, strict=False) def print_networks(self, verbose): print('------------------- Networks -------------------') for name, net in zip(self.model_names, self.get_networks()): num_params = 0 for param in net.parameters(): num_params += param.numel() if verbose: print(net) print('[Network {}] Total number of parameters: {:.3f}M'.format( name, num_params / 1e6)) print('------------------------------------------------') def set_requires_grad(self, nets, requires_grad): if not isinstance(nets, list): nets = [nets] for net in nets: if net: for param in net.parameters(): param.requires_grad = requires_grad def update_learning_rate(self, **kwargs): for scheduler in self.schedulers: scheduler.step() for i, optim in enumerate(self.optimizers): lr = optim.param_groups[0]['lr'] if "opt" in kwargs: opt = kwargs["opt"] if not opt.lr_policy.startswith("iter") or \ ("total_steps" in kwargs and kwargs["total_steps"] % opt.print_freq == 0): print('optimizer {}, learning rate = {:.7f}'.format(i + 1, lr)) else: print('optimizer {}, learning rate = {:.7f}'.format(i + 1, lr))
5,600
34.675159
146
py
pointnerf
pointnerf-master/models/neural_points_volumetric_model.py
from .base_rendering_model import * from .neural_points.neural_points import NeuralPoints from .aggregators.point_aggregators import PointAggregator import os class NeuralPointsVolumetricModel(BaseRenderingModel): @staticmethod def modify_commandline_options(parser, is_train=True): BaseRenderingModel.modify_commandline_options(parser, is_train) NeuralPoints.modify_commandline_options(parser, is_train) PointAggregator.modify_commandline_options(parser, is_train) parser.add_argument( '--neural_point_dir', type=str, default=None, help='alternative loading neural_point directory') parser.add_argument( '--embedding_size', type=int, default=-1, help='number of dimensions for latent code embedding') parser.add_argument( "--loss_embedding_l2_weight", type=float, default=-1, help="weight for the embedding l2 loss", ) parser.add_argument('--loss_kld_weight', type=float, default=-1, help='weight for the VAE kld') # encoder parser.add_argument( "--compute_depth", type=int, default=0, help= "If compute detph or not. If false, depth is only computed when depth is required by losses", ) parser.add_argument( "--raydist_mode_unit", type=int, default=0, help="if set raydist max as one voxel", ) parser.add_argument( '--save_point_freq', type=int, default=100000, help='frequency of showing training results on console') parser.add_argument( '--alter_step', type=int, default=0, help='0 for no alter,') parser.add_argument( '--prob', type=int, default=0, help='will be set as 0 for normal traing and 1 for prob, ') def add_default_color_losses(self, opt): if "coarse_raycolor" not in opt.color_loss_items: opt.color_loss_items.append('coarse_raycolor') if opt.fine_sample_num > 0: opt.color_loss_items.append('fine_raycolor') def add_default_visual_items(self, opt): opt.visual_items = ['gt_image', 'coarse_raycolor', 'queried_shading'] if opt.fine_sample_num > 0: opt.visual_items.append('fine_raycolor') def run_network_models(self): return self.fill_invalid(self.net_ray_marching(**self.input), self.input) def fill_invalid(self, output, input): # ray_mask: torch.Size([1, 1024]) # coarse_is_background: torch.Size([1, 336, 1]) -> 1, 1024, 1 # coarse_raycolor: torch.Size([1, 336, 3]) -> 1, 1024, 3 # coarse_point_opacity: torch.Size([1, 336, 24]) -> 1, 1024, 24 ray_mask = output["ray_mask"] B, OR = ray_mask.shape ray_inds = torch.nonzero(ray_mask) # 336, 2 coarse_is_background_tensor = torch.ones([B, OR, 1], dtype=output["coarse_is_background"].dtype, device=output["coarse_is_background"].device) # print("coarse_is_background", output["coarse_is_background"].shape) # print("coarse_is_background_tensor", coarse_is_background_tensor.shape) # print("ray_inds", ray_inds.shape, ray_mask.shape) coarse_is_background_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["coarse_is_background"] output["coarse_is_background"] = coarse_is_background_tensor output['coarse_mask'] = 1 - coarse_is_background_tensor if "bg_ray" in self.input: coarse_raycolor_tensor = coarse_is_background_tensor * self.input["bg_ray"] coarse_raycolor_tensor[ray_inds[..., 0], ray_inds[..., 1], :] += output["coarse_raycolor"][0] else: coarse_raycolor_tensor = self.tonemap_func( torch.ones([B, OR, 3], dtype=output["coarse_raycolor"].dtype, device=output["coarse_raycolor"].device) * input["bg_color"][None, ...]) coarse_raycolor_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["coarse_raycolor"] output["coarse_raycolor"] = coarse_raycolor_tensor coarse_point_opacity_tensor = torch.zeros([B, OR, output["coarse_point_opacity"].shape[2]], dtype=output["coarse_point_opacity"].dtype, device=output["coarse_point_opacity"].device) coarse_point_opacity_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["coarse_point_opacity"] output["coarse_point_opacity"] = coarse_point_opacity_tensor queried_shading_tensor = torch.ones([B, OR, output["queried_shading"].shape[2]], dtype=output["queried_shading"].dtype, device=output["queried_shading"].device) queried_shading_tensor[ray_inds[..., 0], ray_inds[..., 1], :] = output["queried_shading"] output["queried_shading"] = queried_shading_tensor if self.opt.prob == 1 and "ray_max_shading_opacity" in output: # print("ray_inds", ray_inds.shape, torch.sum(output["ray_mask"])) output = self.unmask(ray_inds, output, ["ray_max_sample_loc_w", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding", "ray_max_far_dist"], B, OR) return output def unmask(self, ray_inds, output, names, B, OR): for name in names: if output[name] is not None: name_tensor = torch.zeros([B, OR, *output[name].shape[2:]], dtype=output[name].dtype, device=output[name].device) name_tensor[ray_inds[..., 0], ray_inds[..., 1], ...] = output[name] output[name] = name_tensor return output def get_additional_network_params(self, opt): param = {} # additional parameters self.aggregator = self.check_getAggregator(opt) self.is_compute_depth = opt.compute_depth or not not opt.depth_loss_items checkpoint_path = os.path.join(opt.checkpoints_dir, opt.name, '{}_net_ray_marching.pth'.format(opt.resume_iter)) checkpoint_path = checkpoint_path if os.path.isfile(checkpoint_path) else None if opt.num_point > 0: self.neural_points = NeuralPoints(opt.point_features_dim, opt.num_point, opt, self.device, checkpoint=checkpoint_path, feature_init_method=opt.feature_init_method, reg_weight=0., feedforward=opt.feedforward) else: self.neural_points = None add_property2dict(param, self, [ 'aggregator', 'is_compute_depth', "neural_points", "opt" ]) add_property2dict(param, opt, [ 'num_pos_freqs', 'num_viewdir_freqs' ]) return param def create_network_models(self, opt): params = self.get_additional_network_params(opt) # network self.net_ray_marching = NeuralPointsRayMarching( **params, **self.found_funcs) self.model_names = ['ray_marching'] if getattr(self, "model_names", None) is None else self.model_names + ['ray_marching'] # parallel if self.opt.gpu_ids: self.net_ray_marching.to(self.device) self.net_ray_marching = torch.nn.DataParallel( self.net_ray_marching, self.opt.gpu_ids) def check_getAggregator(self, opt, **kwargs): aggregator = PointAggregator(opt) return aggregator def setup_optimizer(self, opt): ''' Setup the optimizers for all networks. This assumes network modules have been added to self.model_names By default, it uses an adam optimizer for all parameters. ''' net_params = [] neural_params = [] for name in self.model_names: net = getattr(self, 'net_' + name) param_lst = list(net.named_parameters()) net_params = net_params + [par[1] for par in param_lst if not par[0].startswith("module.neural_points")] neural_params = neural_params + [par[1] for par in param_lst if par[0].startswith("module.neural_points")] self.net_params = net_params self.neural_params = neural_params # opt.lr=0 self.optimizer = torch.optim.Adam(net_params, lr=opt.lr, betas=(0.9, 0.999)) self.neural_point_optimizer = torch.optim.Adam(neural_params, lr=opt.lr, #/ 5.0, betas=(0.9, 0.999)) self.optimizers = [self.optimizer, self.neural_point_optimizer] def backward(self, iters): [optimizer.zero_grad() for optimizer in self.optimizers] if self.opt.is_train: self.loss_total.backward() if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 0: self.optimizer.step() if self.opt.alter_step == 0 or int(iters / self.opt.alter_step) % 2 == 1: self.neural_point_optimizer.step() def optimize_parameters(self, backward=True, total_steps=0): self.forward() self.update_rank_ray_miss(total_steps) self.backward(total_steps) def update_rank_ray_miss(self, total_steps): raise NotImplementedError class NeuralPointsRayMarching(nn.Module): def __init__(self, tonemap_func=None, render_func=None, blend_func=None, aggregator=None, is_compute_depth=False, neural_points=None, opt=None, num_pos_freqs=0, num_viewdir_freqs=0, **kwargs): super(NeuralPointsRayMarching, self).__init__() self.aggregator = aggregator self.num_pos_freqs = num_pos_freqs self.num_viewdir_freqs = num_viewdir_freqs # ray generation self.render_func = render_func self.blend_func = blend_func self.tone_map = tonemap_func self.return_depth = is_compute_depth self.return_color = True self.opt = opt self.neural_points = neural_points def forward(self, campos, raydir, gt_image=None, bg_color=None, camrotc2w=None, pixel_idx=None, near=None, far=None, focal=None, h=None, w=None, intrinsic=None, **kargs): output = {} # B, channel, 292, 24, 32; B, 3, 294, 24, 32; B, 294, 24; B, 291, 2 sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, ray_mask_tensor, vsize, grid_vox_sz = self.neural_points({"pixel_idx": pixel_idx, "camrotc2w": camrotc2w, "campos": campos, "near": near, "far": far,"focal": focal, "h": h, "w": w, "intrinsic": intrinsic,"gt_image":gt_image, "raydir":raydir}) decoded_features, ray_valid, weight, conf_coefficient = self.aggregator(sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, grid_vox_sz) ray_dist = torch.cummax(sample_loc[..., 2], dim=-1)[0] ray_dist = torch.cat([ray_dist[..., 1:] - ray_dist[..., :-1], torch.full((ray_dist.shape[0], ray_dist.shape[1], 1), vsize[2], device=ray_dist.device)], dim=-1) mask = ray_dist < 1e-8 if self.opt.raydist_mode_unit > 0: mask = torch.logical_or(mask, ray_dist > 2 * vsize[2]) mask = mask.to(torch.float32) ray_dist = ray_dist * (1.0 - mask) + mask * vsize[2] ray_dist *= ray_valid.float() # raydir: N x Rays x 3sampled_color # raypos: N x Rays x Samples x 3 # ray_dist: N x Rays x Samples # ray_valid: N x Rays x Samples # ray_features: N x Rays x Samples x Features # Output # ray_color: N x Rays x 3 # point_color: N x Rays x Samples x 3 # opacity: N x Rays x Samples # acc_transmission: N x Rays x Samples # blend_weight: N x Rays x Samples x 1 # background_transmission: N x Rays x 1 # ray march output["queried_shading"] = torch.logical_not(torch.any(ray_valid, dim=-1, keepdims=True)).repeat(1, 1, 3).to(torch.float32) if self.return_color: if "bg_ray" in kargs: bg_color = None ( ray_color, point_color, opacity, acc_transmission, blend_weight, background_transmission, _, ) = ray_march(ray_dist, ray_valid, decoded_features, self.render_func, self.blend_func, bg_color) ray_color = self.tone_map(ray_color) output["coarse_raycolor"] = ray_color output["coarse_point_opacity"] = opacity else: ( opacity, acc_transmission, blend_weight, background_transmission, _, ) = alpha_ray_march(ray_dist, ray_valid, decoded_features, self.blend_func) if self.return_depth: alpha_blend_weight = opacity * acc_transmission weight = alpha_blend_weight.view(alpha_blend_weight.shape[:3]) avg_depth = (weight * ray_ts).sum(-1) / (weight.sum(-1) + 1e-6) output["coarse_depth"] = avg_depth output["coarse_is_background"] = background_transmission output["ray_mask"] = ray_mask_tensor if weight is not None: output["weight"] = weight.detach() output["blend_weight"] = blend_weight.detach() output["conf_coefficient"] = conf_coefficient if self.opt.prob == 1 and output["coarse_point_opacity"].shape[1] > 0 : B, OR, _, _ = sample_pnt_mask.shape if weight is not None: output["ray_max_shading_opacity"], opacity_ind = torch.max(output["coarse_point_opacity"], dim=-1, keepdim=True) opacity_ind=opacity_ind[..., None] # 1, 1024, 1, 1 output["ray_max_sample_loc_w"] = torch.gather(sample_loc_w, 2, opacity_ind.expand(-1, -1, -1, sample_loc_w.shape[-1])).squeeze(2) # 1, 1024, 24, 3 -> 1, 1024, 3 weight = torch.gather(weight*conf_coefficient, 2, opacity_ind.expand(-1, -1, -1, weight.shape[-1])).squeeze(2)[..., None] # 1, 1024, 8 opacity_ind = opacity_ind[...,None] sampled_xyz_max_opacity = torch.gather(sampled_xyz, 2, opacity_ind.expand(-1, -1, -1, sampled_xyz.shape[-2], sampled_xyz.shape[-1])).squeeze(2) # 1, 1024, 8, 3 output["ray_max_far_dist"] = torch.min(torch.norm(sampled_xyz_max_opacity - output["ray_max_sample_loc_w"][..., None,:], dim=-1), axis=-1, keepdim=True)[0] sampled_color = torch.gather(sampled_color, 2, opacity_ind.expand(-1, -1, -1, sampled_color.shape[-2], sampled_color.shape[-1])).squeeze(2) if sampled_color is not None else None # 1, 1024, 8, 3 sampled_dir = torch.gather(sampled_dir, 2, opacity_ind.expand(-1, -1, -1, sampled_dir.shape[-2], sampled_dir.shape[-1])).squeeze(2) if sampled_dir is not None else None # 1, 1024, 8, 3 sampled_conf = torch.gather(sampled_conf, 2, opacity_ind.expand(-1, -1, -1, sampled_conf.shape[-2], sampled_conf.shape[-1])).squeeze(2) if sampled_conf is not None else None # 1, 1024, 8, 1 sampled_embedding = torch.gather(sampled_embedding, 2, opacity_ind.expand(-1, -1, -1, sampled_embedding.shape[-2], sampled_embedding.shape[-1])).squeeze(2) # 1, 1024, 8, 1 output["shading_avg_color"] = torch.sum(sampled_color * weight, dim=-2) if sampled_color is not None else None output["shading_avg_dir"] = torch.sum(sampled_dir * weight, dim=-2) if sampled_dir is not None else None output["shading_avg_conf"] = torch.sum(sampled_conf * weight, dim=-2) if sampled_conf is not None else None output["shading_avg_embedding"] = torch.sum(sampled_embedding * weight, dim=-2) else: output.update({ "ray_max_shading_opacity": torch.zeros([0, 0, 1, 1], device="cuda"), "ray_max_sample_loc_w": torch.zeros([0, 0, 3], device="cuda"), "ray_max_far_dist": torch.zeros([0, 0, 1], device="cuda"), "shading_avg_color": torch.zeros([0, 0, 3], device="cuda"), "shading_avg_dir": torch.zeros([0, 0, 3], device="cuda"), "shading_avg_conf": torch.zeros([0, 0, 1], device="cuda"), "shading_avg_embedding": torch.zeros([0, 0, sampled_embedding.shape[-1]], device="cuda"), }) return output
17,131
46.065934
416
py
pointnerf
pointnerf-master/models/neural_points/point_query.py
import torch import torch.nn import torch.nn.functional as F import os import numpy as np from numpy import dot from math import sqrt import matplotlib.pyplot as plt import pickle import time from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation parent_dir = os.path.dirname(os.path.abspath(__file__)) from torch.utils.cpp_extension import load as load_cuda query_worldcoords_cuda = load_cuda( name='query_worldcoords_cuda', sources=[ os.path.join(parent_dir, path) for path in ['cuda/query_worldcoords.cpp', 'cuda/query_worldcoords.cu']], verbose=True) class lighting_fast_querier(): def __init__(self, device, opt): print("querier device", device, device.index) self.device="cuda" self.gpu = device.index self.opt = opt self.inverse = self.opt.inverse self.count=0 self.radius_limit_np = np.asarray(self.opt.radius_limit_scale * max(self.opt.vsize[0], self.opt.vsize[1])).astype(np.float32) self.vscale_np = np.array(self.opt.vscale, dtype=np.int32) self.scaled_vsize_np = (self.opt.vsize * self.vscale_np).astype(np.float32) self.scaled_vsize_tensor = torch.as_tensor(self.scaled_vsize_np, device=device) self.kernel_size = np.asarray(self.opt.kernel_size, dtype=np.int32) self.kernel_size_tensor = torch.as_tensor(self.kernel_size, device=device) self.query_size = np.asarray(self.opt.query_size, dtype=np.int32) self.query_size_tensor = torch.as_tensor(self.query_size, device=device) def clean_up(self): pass def get_hyperparameters(self, vsize_np, point_xyz_w_tensor, ranges=None): ''' :param l: :param h: :param w: :param zdim: :param ydim: :param xdim: :return: ''' min_xyz, max_xyz = torch.min(point_xyz_w_tensor, dim=-2)[0][0], torch.max(point_xyz_w_tensor, dim=-2)[0][0] ranges_min = torch.as_tensor(ranges[:3], dtype=torch.float32, device=min_xyz.device) ranges_max = torch.as_tensor(ranges[3:], dtype=torch.float32, device=min_xyz.device) if ranges is not None: # print("min_xyz", min_xyz.shape) # print("max_xyz", max_xyz.shape) # print("ranges", ranges) min_xyz, max_xyz = torch.max(torch.stack([min_xyz, ranges_min], dim=0), dim=0)[0], torch.min(torch.stack([max_xyz, ranges_max], dim=0), dim=0)[0] min_xyz = min_xyz - torch.as_tensor(self.scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32) max_xyz = max_xyz + torch.as_tensor(self.scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32) ranges_tensor = torch.cat([min_xyz, max_xyz], dim=-1) vdim_np = (max_xyz - min_xyz).cpu().numpy() / vsize_np scaled_vdim_np = np.ceil(vdim_np / self.vscale_np).astype(np.int32) return ranges_tensor, vsize_np, scaled_vdim_np def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor): near_depth, far_depth = np.asarray(near_depth).item() , np.asarray(far_depth).item() ranges_tensor, vsize_np, scaled_vdim_np = self.get_hyperparameters(self.opt.vsize, point_xyz_w_tensor, ranges=self.opt.ranges) # print("self.opt.ranges", self.opt.ranges, range_gpu, ray_dirs_tensor) if self.opt.inverse > 0: raypos_tensor, _, _, _ = near_far_disparity_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.) else: raypos_tensor, _, _, _ = near_far_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.) D = raypos_tensor.shape[2] R = pixel_idx_tensor.reshape(point_xyz_w_tensor.shape[0], -1, 2).shape[1] sample_pidx_tensor, sample_loc_w_tensor, ray_mask_tensor = \ query_worldcoords_cuda.woord_query_grid_point_index(pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, self.kernel_size_tensor, self.query_size_tensor, self.opt.SR, self.opt.K, R, D, torch.as_tensor(scaled_vdim_np,device=self.device), self.opt.max_o, self.opt.P, self.radius_limit_np, ranges_tensor, self.scaled_vsize_tensor, self.opt.gpu_maxthr, self.opt.NN) sample_ray_dirs_tensor = torch.masked_select(ray_dirs_tensor, ray_mask_tensor[..., None]>0).reshape(ray_dirs_tensor.shape[0],-1,3)[...,None,:].expand(-1, -1, self.opt.SR, -1).contiguous() # print("sample_ray_dirs_tensor", sample_ray_dirs_tensor.shape) return sample_pidx_tensor, self.w2pers(sample_loc_w_tensor, cam_rot_tensor, cam_pos_tensor), \ sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize_np, ranges_tensor.cpu().numpy() def w2pers(self, point_xyz_w, camrotc2w, campos): # point_xyz_pers B X M X 3 xyz_w_shift = point_xyz_w - campos[:, None, :] xyz_c = torch.sum(xyz_w_shift[..., None,:] * torch.transpose(camrotc2w, 1, 2)[:, None, None,...], dim=-1) z_pers = xyz_c[..., 2] x_pers = xyz_c[..., 0] / xyz_c[..., 2] y_pers = xyz_c[..., 1] / xyz_c[..., 2] return torch.stack([x_pers, y_pers, z_pers], dim=-1)
5,914
53.768519
209
py
pointnerf
pointnerf-master/models/neural_points/query_point_indices_worldcoords.py
import os import numpy as np from numpy import dot from math import sqrt import pycuda from pycuda.compiler import SourceModule import pycuda.driver as drv import pycuda.gpuarray as gpuarray import matplotlib.pyplot as plt import torch import pickle import time from models.rendering.diff_ray_marching import near_far_linear_ray_generation, near_far_disparity_linear_ray_generation from data.load_blender import load_blender_data # X = torch.cuda.FloatTensor(8) class Holder(pycuda.driver.PointerHolderBase): def __init__(self, t): super(Holder, self).__init__() self.t = t self.gpudata = t.data_ptr() def get_pointer(self): return self.t.data_ptr() class lighting_fast_querier(): def __init__(self, device, opt): print("querier device", device, device.index) self.gpu = device.index self.opt = opt drv.init() # self.device = drv.Device(gpu) self.ctx = drv.Device(self.gpu).make_context() self.claim_occ, self.map_coor2occ, self.fill_occ2pnts, self.mask_raypos, self.get_shadingloc, self.query_along_ray = self.build_cuda() self.inverse = self.opt.inverse self.count=0 def clean_up(self): self.ctx.pop() def get_hyperparameters(self, vsize_np, point_xyz_w_tensor, ranges=None): ''' :param l: :param h: :param w: :param zdim: :param ydim: :param xdim: :return: ''' min_xyz, max_xyz = torch.min(point_xyz_w_tensor, dim=-2)[0][0], torch.max(point_xyz_w_tensor, dim=-2)[0][0] vscale_np = np.array(self.opt.vscale, dtype=np.int32) scaled_vsize_np = (vsize_np * vscale_np).astype(np.float32) if ranges is not None: # print("min_xyz", min_xyz.shape) # print("max_xyz", max_xyz.shape) # print("ranges", ranges) min_xyz, max_xyz = torch.max(torch.stack([min_xyz, torch.as_tensor(ranges[:3], dtype=torch.float32, device=min_xyz.device)], dim=0), dim=0)[0], torch.min(torch.stack([max_xyz, torch.as_tensor(ranges[3:], dtype=torch.float32, device=min_xyz.device)], dim=0), dim=0)[0] min_xyz = min_xyz - torch.as_tensor(scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32) max_xyz = max_xyz + torch.as_tensor(scaled_vsize_np * self.opt.kernel_size / 2, device=min_xyz.device, dtype=torch.float32) ranges_np = torch.cat([min_xyz, max_xyz], dim=-1).cpu().numpy().astype(np.float32) # print("ranges_np",ranges_np) vdim_np = (max_xyz - min_xyz).cpu().numpy() / vsize_np scaled_vdim_np = np.ceil(vdim_np / vscale_np).astype(np.int32) ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = np_to_gpuarray( ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, np.asarray(self.opt.kernel_size, dtype=np.int32), np.asarray(self.opt.query_size, dtype=np.int32)) radius_limit_np, depth_limit_np = self.opt.radius_limit_scale * max(vsize_np[0], vsize_np[1]), self.opt.depth_limit_scale * vsize_np[2] return np.asarray(radius_limit_np).astype(np.float32), np.asarray(depth_limit_np).astype(np.float32), ranges_np, vsize_np, vdim_np, scaled_vsize_np, scaled_vdim_np, vscale_np, ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor): near_depth, far_depth = np.asarray(near_depth).item() , np.asarray(far_depth).item() radius_limit_np, depth_limit_np, ranges_np, vsize_np, vdim_np, scaled_vsize_np, scaled_vdim_np, vscale_np, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(self.opt.vsize, point_xyz_w_tensor, ranges=self.opt.ranges) # print("self.opt.ranges", self.opt.ranges, range_gpu, ray_dirs_tensor) if self.opt.inverse > 0: raypos_tensor, _, _, _ = near_far_disparity_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.) else: raypos_tensor, _, _, _ = near_far_linear_ray_generation(cam_pos_tensor, ray_dirs_tensor, self.opt.z_depth_dim, near=near_depth, far=far_depth, jitter=0.3 if self.opt.is_train > 0 else 0.) sample_pidx_tensor, sample_loc_w_tensor, ray_mask_tensor = self.query_grid_point_index(h, w, pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, self.opt.SR, self.opt.K, ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, self.opt.max_o, self.opt.P, radius_limit_np, depth_limit_np, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, ray_dirs_tensor, cam_pos_tensor, kMaxThreadsPerBlock=self.opt.gpu_maxthr) sample_ray_dirs_tensor = torch.masked_select(ray_dirs_tensor, ray_mask_tensor[..., None]>0).reshape(ray_dirs_tensor.shape[0],-1,3)[...,None,:].expand(-1, -1, self.opt.SR, -1).contiguous() # print("sample_ray_dirs_tensor", sample_ray_dirs_tensor.shape) return sample_pidx_tensor, self.w2pers(sample_loc_w_tensor, cam_rot_tensor, cam_pos_tensor), sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize_np, ranges_np def w2pers(self, point_xyz_w, camrotc2w, campos): # point_xyz_pers B X M X 3 xyz_w_shift = point_xyz_w - campos[:, None, :] xyz_c = torch.sum(xyz_w_shift[..., None,:] * torch.transpose(camrotc2w, 1, 2)[:, None, None,...], dim=-1) z_pers = xyz_c[..., 2] x_pers = xyz_c[..., 0] / xyz_c[..., 2] y_pers = xyz_c[..., 1] / xyz_c[..., 2] return torch.stack([x_pers, y_pers, z_pers], dim=-1) def build_cuda(self): mod = SourceModule( """ #define KN """ + str(self.opt.K) + """ #include <cuda.h> #include <cuda_runtime.h> #include <algorithm> #include <vector> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <curand_kernel.h> namespace cuda { static __device__ inline uint8_t atomicAdd(uint8_t *address, uint8_t val) { size_t offset = (size_t)address & 3; uint32_t *address_as_ui = (uint32_t *)(address - offset); uint32_t old = *address_as_ui; uint32_t shift = offset * 8; uint32_t old_byte; uint32_t newval; uint32_t assumed; do { assumed = old; old_byte = (old >> shift) & 0xff; // preserve size in initial cast. Casting directly to uint32_t pads // negative signed values with 1's (e.g. signed -1 = unsigned ~0). newval = static_cast<uint8_t>(val + old_byte); newval = (old & ~(0x000000ff << shift)) | (newval << shift); old = atomicCAS(address_as_ui, assumed, newval); } while (assumed != old); return __byte_perm(old, 0, offset); // need validate } static __device__ inline char atomicAdd(char* address, char val) { // offset, in bytes, of the char* address within the 32-bit address of the space that overlaps it size_t long_address_modulo = (size_t) address & 3; // the 32-bit address that overlaps the same memory auto* base_address = (unsigned int*) ((char*) address - long_address_modulo); // A 0x3210 selector in __byte_perm will simply select all four bytes in the first argument in the same order. // The "4" signifies the position where the first byte of the second argument will end up in the output. unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; // for selecting bytes within a 32-bit chunk that correspond to the char* address (relative to base_address) unsigned int selector = selectors[long_address_modulo]; unsigned int long_old, long_assumed, long_val, replacement; long_old = *base_address; do { long_assumed = long_old; // replace bits in long_old that pertain to the char address with those from val long_val = __byte_perm(long_old, 0, long_address_modulo) + val; replacement = __byte_perm(long_old, long_val, selector); long_old = atomicCAS(base_address, long_assumed, replacement); } while (long_old != long_assumed); return __byte_perm(long_old, 0, long_address_modulo); } static __device__ inline int8_t atomicAdd(int8_t *address, int8_t val) { return (int8_t)cuda::atomicAdd((char*)address, (char)val); } static __device__ inline short atomicAdd(short* address, short val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~2); unsigned int long_val = ((size_t)address & 2) ? ((unsigned int)val << 16) : (unsigned short)val; unsigned int long_old = ::atomicAdd(base_address, long_val); if((size_t)address & 2) { return (short)(long_old >> 16); } else { unsigned int overflow = ((long_old & 0xffff) + long_val) & 0xffff0000; if (overflow) atomicSub(base_address, overflow); return (short)(long_old & 0xffff); } } static __device__ float cas(double *addr, double compare, double val) { unsigned long long int *address_as_ull = (unsigned long long int *) addr; return __longlong_as_double(atomicCAS(address_as_ull, __double_as_longlong(compare), __double_as_longlong(val))); } static __device__ float cas(float *addr, float compare, float val) { unsigned int *address_as_uint = (unsigned int *) addr; return __uint_as_float(atomicCAS(address_as_uint, __float_as_uint(compare), __float_as_uint(val))); } static __device__ inline uint8_t atomicCAS(uint8_t * const address, uint8_t const compare, uint8_t const value) { uint8_t const longAddressModulo = reinterpret_cast< size_t >( address ) & 0x3; uint32_t *const baseAddress = reinterpret_cast< uint32_t * >( address - longAddressModulo ); uint32_t constexpr byteSelection[] = { 0x3214, 0x3240, 0x3410, 0x4210 }; // The byte position we work on is '4'. uint32_t const byteSelector = byteSelection[ longAddressModulo ]; uint32_t const longCompare = compare; uint32_t const longValue = value; uint32_t longOldValue = * baseAddress; uint32_t longAssumed; uint8_t oldValue; do { // Select bytes from the old value and new value to construct a 32-bit value to use. uint32_t const replacement = __byte_perm( longOldValue, longValue, byteSelector ); uint32_t const comparison = __byte_perm( longOldValue, longCompare, byteSelector ); longAssumed = longOldValue; // Use 32-bit atomicCAS() to try and set the 8-bits we care about. longOldValue = ::atomicCAS( baseAddress, comparison, replacement ); // Grab the 8-bit portion we care about from the old value at address. oldValue = ( longOldValue >> ( 8 * longAddressModulo )) & 0xFF; } while ( compare == oldValue and longAssumed != longOldValue ); // Repeat until other three 8-bit values stabilize. return oldValue; } } extern "C" { __global__ void claim_occ( const float* in_data, // B * N * 3 const int* in_actual_numpoints, // B const int B, const int N, const float *d_coord_shift, // 3 const float *d_voxel_size, // 3 const int *d_grid_size, // 3 const int grid_size_vol, const int max_o, int* occ_idx, // B, all 0 int *coor_2_occ, // B * 400 * 400 * 400, all -1 int *occ_2_coor, // B * max_o * 3, all -1 unsigned long seconds ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / N; // index of batch if (i_batch >= B) { return; } int i_pt = index - N * i_batch; if (i_pt < in_actual_numpoints[i_batch]) { int coor[3]; const float *p_pt = in_data + index * 3; coor[0] = (int) floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]); coor[1] = (int) floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]); coor[2] = (int) floor((p_pt[2] - d_coord_shift[2]) / d_voxel_size[2]); // printf("p_pt %f %f %f %f; ", p_pt[2], d_coord_shift[2], d_coord_shift[0], d_coord_shift[1]); if (coor[0] < 0 || coor[0] >= d_grid_size[0] || coor[1] < 0 || coor[1] >= d_grid_size[1] || coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; } int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2]; int voxel_idx = coor_2_occ[coor_indx_b]; if (voxel_idx == -1) { // found an empty voxel int old_voxel_num = atomicCAS( &coor_2_occ[coor_indx_b], -1, 0 ); if (old_voxel_num == -1) { // CAS -> old val, if old val is -1 // if we get -1, this thread is the one who obtain a new voxel // so only this thread should do the increase operator below int tmp = atomicAdd(occ_idx+i_batch, 1); // increase the counter, return old counter // increase the counter, return old counter if (tmp < max_o) { int coord_inds = (i_batch * max_o + tmp) * 3; occ_2_coor[coord_inds] = coor[0]; occ_2_coor[coord_inds + 1] = coor[1]; occ_2_coor[coord_inds + 2] = coor[2]; } else { curandState state; curand_init(index+2*seconds, 0, 0, &state); int insrtidx = ceilf(curand_uniform(&state) * (tmp+1)) - 1; if(insrtidx < max_o){ int coord_inds = (i_batch * max_o + insrtidx) * 3; occ_2_coor[coord_inds] = coor[0]; occ_2_coor[coord_inds + 1] = coor[1]; occ_2_coor[coord_inds + 2] = coor[2]; } } } } } } __global__ void map_coor2occ( const int B, const int *d_grid_size, // 3 const int *kernel_size, // 3 const int grid_size_vol, const int max_o, int* occ_idx, // B, all -1 int *coor_occ, // B * 400 * 400 * 400 int *coor_2_occ, // B * 400 * 400 * 400 int *occ_2_coor // B * max_o * 3 ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / max_o; // index of batch if (i_batch >= B) { return; } int i_pt = index - max_o * i_batch; if (i_pt < occ_idx[i_batch] && i_pt < max_o) { int coor[3]; coor[0] = occ_2_coor[index*3]; if (coor[0] < 0) { return; } coor[1] = occ_2_coor[index*3+1]; coor[2] = occ_2_coor[index*3+2]; int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2]; coor_2_occ[coor_indx_b] = i_pt; // printf("kernel_size[0] %d", kernel_size[0]); for (int coor_x = max(0, coor[0] - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], coor[0] + (kernel_size[0] + 1) / 2); coor_x++) { for (int coor_y = max(0, coor[1] - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], coor[1] + (kernel_size[1] + 1) / 2); coor_y++) { for (int coor_z = max(0, coor[2] - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], coor[2] + (kernel_size[2] + 1) / 2); coor_z++) { coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z; if (coor_occ[coor_indx_b] > 0) { continue; } atomicCAS(coor_occ + coor_indx_b, 0, 1); } } } } } __global__ void fill_occ2pnts( const float* in_data, // B * N * 3 const int* in_actual_numpoints, // B const int B, const int N, const int P, const float *d_coord_shift, // 3 const float *d_voxel_size, // 3 const int *d_grid_size, // 3 const int grid_size_vol, const int max_o, int *coor_2_occ, // B * 400 * 400 * 400, all -1 int *occ_2_pnts, // B * max_o * P, all -1 int *occ_numpnts, // B * max_o, all 0 unsigned long seconds ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / N; // index of batch if (i_batch >= B) { return; } int i_pt = index - N * i_batch; if (i_pt < in_actual_numpoints[i_batch]) { int coor[3]; const float *p_pt = in_data + index * 3; coor[0] = (int) floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]); coor[1] = (int) floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]); coor[2] = (int) floor((p_pt[2] - d_coord_shift[2]) / d_voxel_size[2]); if (coor[0] < 0 || coor[0] >= d_grid_size[0] || coor[1] < 0 || coor[1] >= d_grid_size[1] || coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; } int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2]; int voxel_idx = coor_2_occ[coor_indx_b]; if (voxel_idx > 0) { // found an claimed coor2occ int occ_indx_b = i_batch * max_o + voxel_idx; int tmp = atomicAdd(occ_numpnts + occ_indx_b, 1); // increase the counter, return old counter if (tmp < P) { occ_2_pnts[occ_indx_b * P + tmp] = i_pt; } else { curandState state; curand_init(index+2*seconds, 0, 0, &state); int insrtidx = ceilf(curand_uniform(&state) * (tmp+1)) - 1; if(insrtidx < P){ occ_2_pnts[occ_indx_b * P + insrtidx] = i_pt; } } } } } __global__ void mask_raypos( float *raypos, // [B, 2048, 400, 3] int *coor_occ, // B * 400 * 400 * 400 const int B, // 3 const int R, // 3 const int D, // 3 const int grid_size_vol, const float *d_coord_shift, // 3 const int *d_grid_size, // 3 const float *d_voxel_size, // 3 int *raypos_mask // B, R, D ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / (R * D); // index of batch if (i_batch >= B) { return; } int coor[3]; coor[0] = (int) floor((raypos[index*3] - d_coord_shift[0]) / d_voxel_size[0]); coor[1] = (int) floor((raypos[index*3+1] - d_coord_shift[1]) / d_voxel_size[1]); coor[2] = (int) floor((raypos[index*3+2] - d_coord_shift[2]) / d_voxel_size[2]); // printf(" %f %f %f;", raypos[index*3], raypos[index*3+1], raypos[index*3+2]); if ((coor[0] >= 0) && (coor[0] < d_grid_size[0]) && (coor[1] >= 0) && (coor[1] < d_grid_size[1]) && (coor[2] >= 0) && (coor[2] < d_grid_size[2])) { int coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2]; raypos_mask[index] = coor_occ[coor_indx_b]; } } __global__ void get_shadingloc( const float *raypos, // [B, 2048, 400, 3] const int *raypos_mask, // B, R, D const int B, // 3 const int R, // 3 const int D, // 3 const int SR, // 3 float *sample_loc, // B * R * SR * 3 int *sample_loc_mask // B * R * SR ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / (R * D); // index of batch if (i_batch >= B) { return; } int temp = raypos_mask[index]; if (temp >= 0) { int r = (index - i_batch * R * D) / D; int loc_inds = i_batch * R * SR + r * SR + temp; sample_loc[loc_inds * 3] = raypos[index * 3]; sample_loc[loc_inds * 3 + 1] = raypos[index * 3 + 1]; sample_loc[loc_inds * 3 + 2] = raypos[index * 3 + 2]; sample_loc_mask[loc_inds] = 1; } } __global__ void query_neigh_along_ray_layered( const float* in_data, // B * N * 3 const int B, const int SR, // num. samples along each ray e.g., 128 const int R, // e.g., 1024 const int max_o, const int P, const int K, // num. neighbors const int grid_size_vol, const float radius_limit2, const float *d_coord_shift, // 3 const int *d_grid_size, const float *d_voxel_size, // 3 const int *kernel_size, const int *occ_numpnts, // B * max_o const int *occ_2_pnts, // B * max_o * P const int *coor_2_occ, // B * 400 * 400 * 400 const float *sample_loc, // B * R * SR * 3 const int *sample_loc_mask, // B * R * SR int *sample_pidx, // B * R * SR * K unsigned long seconds, const int NN ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / (R * SR); // index of batch if (i_batch >= B || sample_loc_mask[index] <= 0) { return; } float centerx = sample_loc[index * 3]; float centery = sample_loc[index * 3 + 1]; float centerz = sample_loc[index * 3 + 2]; int frustx = (int) floor((centerx - d_coord_shift[0]) / d_voxel_size[0]); int frusty = (int) floor((centery - d_coord_shift[1]) / d_voxel_size[1]); int frustz = (int) floor((centerz - d_coord_shift[2]) / d_voxel_size[2]); centerx = sample_loc[index * 3]; centery = sample_loc[index * 3 + 1]; centerz = sample_loc[index * 3 + 2]; int kid = 0, far_ind = 0, coor_z, coor_y, coor_x; float far2 = 0.0; float xyz2Buffer[KN]; for (int layer = 0; layer < (kernel_size[0]+1)/2; layer++){ for (int x = max(-frustx, -layer); x < min(d_grid_size[0] - frustx, layer + 1); x++) { coor_x = frustx + x; for (int y = max(-frusty, -layer); y < min(d_grid_size[1] - frusty, layer + 1); y++) { coor_y = frusty + y; for (int z = max(-frustz, -layer); z < min(d_grid_size[2] - frustz, layer + 1); z++) { coor_z = z + frustz; if (max(abs(z), max(abs(x), abs(y))) != layer) continue; int coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z; int occ_indx = coor_2_occ[coor_indx_b] + i_batch * max_o; if (occ_indx >= 0) { for (int g = 0; g < min(P, occ_numpnts[occ_indx]); g++) { int pidx = occ_2_pnts[occ_indx * P + g]; float x_v = (in_data[pidx*3]-centerx); float y_v = (in_data[pidx*3 + 1]-centery); float z_v = (in_data[pidx*3 + 2]-centerz); float xyz2 = x_v * x_v + y_v * y_v + z_v * z_v; if ((radius_limit2 == 0 || xyz2 <= radius_limit2)){ if (kid++ < K) { sample_pidx[index * K + kid - 1] = pidx; xyz2Buffer[kid-1] = xyz2; if (xyz2 > far2){ far2 = xyz2; far_ind = kid - 1; } } else { if (xyz2 < far2) { sample_pidx[index * K + far_ind] = pidx; xyz2Buffer[far_ind] = xyz2; far2 = xyz2; for (int i = 0; i < K; i++) { if (xyz2Buffer[i] > far2) { far2 = xyz2Buffer[i]; far_ind = i; } } } } } } } } } } if (kid >= K) break; } } } """, no_extern_c=True) claim_occ = mod.get_function("claim_occ") map_coor2occ = mod.get_function("map_coor2occ") fill_occ2pnts = mod.get_function("fill_occ2pnts") mask_raypos = mod.get_function("mask_raypos") get_shadingloc = mod.get_function("get_shadingloc") query_along_ray = mod.get_function("query_neigh_along_ray_layered") if self.opt.NN > 0 else mod.get_function("query_rand_along_ray") return claim_occ, map_coor2occ, fill_occ2pnts, mask_raypos, get_shadingloc, query_along_ray def switch_pixel_id(self, pixel_idx_tensor, h): pixel_id = torch.cat([pixel_idx_tensor[..., 0:1], h - 1 - pixel_idx_tensor[..., 1:2]], dim=-1) # print("pixel_id", pixel_id.shape, torch.min(pixel_id, dim=-2)[0], torch.max(pixel_id, dim=-2)[0]) return pixel_id def build_occ_vox(self, point_xyz_w_tensor, actual_numpoints_tensor, B, N, P, max_o, scaled_vdim_np, kMaxThreadsPerBlock, gridSize, scaled_vsize_gpu, scaled_vdim_gpu, kernel_size_gpu, grid_size_vol, d_coord_shift): device = point_xyz_w_tensor.device coor_occ_tensor = torch.zeros([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], dtype=torch.int32, device=device) occ_2_pnts_tensor = torch.full([B, max_o, P], -1, dtype=torch.int32, device=device) occ_2_coor_tensor = torch.full([B, max_o, 3], -1, dtype=torch.int32, device=device) occ_numpnts_tensor = torch.zeros([B, max_o], dtype=torch.int32, device=device) coor_2_occ_tensor = torch.full([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], -1, dtype=torch.int32, device=device) occ_idx_tensor = torch.zeros([B], dtype=torch.int32, device=device) seconds = time.time() self.claim_occ( Holder(point_xyz_w_tensor), Holder(actual_numpoints_tensor), np.int32(B), np.int32(N), d_coord_shift, scaled_vsize_gpu, scaled_vdim_gpu, np.int32(grid_size_vol), np.int32(max_o), Holder(occ_idx_tensor), Holder(coor_2_occ_tensor), Holder(occ_2_coor_tensor), np.uint64(seconds), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() coor_2_occ_tensor = torch.full([B, scaled_vdim_np[0], scaled_vdim_np[1], scaled_vdim_np[2]], -1, dtype=torch.int32, device=device) gridSize = int((B * max_o + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) self.map_coor2occ( np.int32(B), scaled_vdim_gpu, kernel_size_gpu, np.int32(grid_size_vol), np.int32(max_o), Holder(occ_idx_tensor), Holder(coor_occ_tensor), Holder(coor_2_occ_tensor), Holder(occ_2_coor_tensor), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() seconds = time.time() gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) self.fill_occ2pnts( Holder(point_xyz_w_tensor), Holder(actual_numpoints_tensor), np.int32(B), np.int32(N), np.int32(P), d_coord_shift, scaled_vsize_gpu, scaled_vdim_gpu, np.int32(grid_size_vol), np.int32(max_o), Holder(coor_2_occ_tensor), Holder(occ_2_pnts_tensor), Holder(occ_numpnts_tensor), np.uint64(seconds), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() return coor_occ_tensor, occ_2_coor_tensor, coor_2_occ_tensor, occ_idx_tensor, occ_numpnts_tensor, occ_2_pnts_tensor def query_grid_point_index(self, h, w, pixel_idx_tensor, raypos_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, SR, K, ranges_np, scaled_vsize_np, scaled_vdim_np, vscale_np, max_o, P, radius_limit_np, depth_limit_np, ranges_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, ray_dirs_tensor, cam_pos_tensor, kMaxThreadsPerBlock = 1024): device = point_xyz_w_tensor.device B, N = point_xyz_w_tensor.shape[0], point_xyz_w_tensor.shape[1] pixel_size = scaled_vdim_np[0] * scaled_vdim_np[1] grid_size_vol = pixel_size * scaled_vdim_np[2] d_coord_shift = ranges_gpu[:3] R, D = raypos_tensor.shape[1], raypos_tensor.shape[2] R = pixel_idx_tensor.reshape(B, -1, 2).shape[1] gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) coor_occ_tensor, occ_2_coor_tensor, coor_2_occ_tensor, occ_idx_tensor, occ_numpnts_tensor, occ_2_pnts_tensor = self.build_occ_vox(point_xyz_w_tensor, actual_numpoints_tensor, B, N, P, max_o, scaled_vdim_np, kMaxThreadsPerBlock, gridSize, scaled_vsize_gpu, scaled_vdim_gpu, query_size_gpu, grid_size_vol, d_coord_shift) # torch.cuda.synchronize() # print("coor_occ_tensor", torch.min(coor_occ_tensor), torch.max(coor_occ_tensor), torch.min(occ_2_coor_tensor), torch.max(occ_2_coor_tensor), torch.min(coor_2_occ_tensor), torch.max(coor_2_occ_tensor), torch.min(occ_idx_tensor), torch.max(occ_idx_tensor), torch.min(occ_numpnts_tensor), torch.max(occ_numpnts_tensor), torch.min(occ_2_pnts_tensor), torch.max(occ_2_pnts_tensor), occ_2_pnts_tensor.shape) # print("occ_numpnts_tensor", torch.sum(occ_numpnts_tensor > 0), ranges_np) # vis_vox(ranges_np, scaled_vsize_np, coor_2_occ_tensor) raypos_mask_tensor = torch.zeros([B, R, D], dtype=torch.int32, device=device) gridSize = int((B * R * D + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) self.mask_raypos( Holder(raypos_tensor), # [1, 2048, 400, 3] Holder(coor_occ_tensor), # [1, 2048, 400, 3] np.int32(B), np.int32(R), np.int32(D), np.int32(grid_size_vol), d_coord_shift, scaled_vdim_gpu, scaled_vsize_gpu, Holder(raypos_mask_tensor), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1) ) # torch.cuda.synchronize() # print("raypos_mask_tensor", raypos_mask_tensor.shape, torch.sum(coor_occ_tensor), torch.sum(raypos_mask_tensor)) # save_points(raypos_tensor.reshape(-1, 3), "./", "rawraypos_pnts") # raypos_masked = torch.masked_select(raypos_tensor, raypos_mask_tensor[..., None] > 0) # save_points(raypos_masked.reshape(-1, 3), "./", "raypos_pnts") ray_mask_tensor = torch.max(raypos_mask_tensor, dim=-1)[0] > 0 # B, R R = torch.sum(ray_mask_tensor.to(torch.int32)).cpu().numpy() # print("R", torch.sum(ray_mask_tensor.to(torch.int32)), R) sample_loc_tensor = torch.zeros([B, R, SR, 3], dtype=torch.float32, device=device) sample_pidx_tensor = torch.full([B, R, SR, K], -1, dtype=torch.int32, device=device) if R > 0: raypos_tensor = torch.masked_select(raypos_tensor, ray_mask_tensor[..., None, None].expand(-1, -1, D, 3)).reshape(B, R, D, 3) raypos_mask_tensor = torch.masked_select(raypos_mask_tensor, ray_mask_tensor[..., None].expand(-1, -1, D)).reshape(B, R, D) # print("R", R, raypos_tensor.shape, raypos_mask_tensor.shape) raypos_maskcum = torch.cumsum(raypos_mask_tensor, dim=-1).to(torch.int32) raypos_mask_tensor = (raypos_mask_tensor * raypos_maskcum * (raypos_maskcum <= SR)) - 1 sample_loc_mask_tensor = torch.zeros([B, R, SR], dtype=torch.int32, device=device) self.get_shadingloc( Holder(raypos_tensor), # [1, 2048, 400, 3] Holder(raypos_mask_tensor), np.int32(B), np.int32(R), np.int32(D), np.int32(SR), Holder(sample_loc_tensor), Holder(sample_loc_mask_tensor), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1) ) # torch.cuda.synchronize() # print("shadingloc_mask_tensor", torch.sum(sample_loc_mask_tensor, dim=-1), torch.sum(torch.sum(sample_loc_mask_tensor, dim=-1) > 0), torch.sum(sample_loc_mask_tensor > 0)) # shadingloc_masked = torch.masked_select(sample_loc_tensor, sample_loc_mask_tensor[..., None] > 0) # save_points(shadingloc_masked.reshape(-1, 3), "./", "shading_pnts{}".format(self.count)) seconds = time.time() gridSize = int((B * R * SR + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) self.query_along_ray( Holder(point_xyz_w_tensor), np.int32(B), np.int32(SR), np.int32(R), np.int32(max_o), np.int32(P), np.int32(K), np.int32(grid_size_vol), np.float32(radius_limit_np ** 2), d_coord_shift, scaled_vdim_gpu, scaled_vsize_gpu, kernel_size_gpu, Holder(occ_numpnts_tensor), Holder(occ_2_pnts_tensor), Holder(coor_2_occ_tensor), Holder(sample_loc_tensor), Holder(sample_loc_mask_tensor), Holder(sample_pidx_tensor), np.uint64(seconds), np.int32(self.opt.NN), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() # print("point_xyz_w_tensor",point_xyz_w_tensor.shape) # queried_masked = point_xyz_w_tensor[0][sample_pidx_tensor.reshape(-1).to(torch.int64), :] # save_points(queried_masked.reshape(-1, 3), "./", "queried_pnts{}".format(self.count)) # print("valid ray", torch.sum(torch.sum(sample_loc_mask_tensor, dim=-1) > 0)) # masked_valid_ray = torch.sum(sample_pidx_tensor.view(B, R, -1) >= 0, dim=-1) > 0 R = torch.max(torch.sum(masked_valid_ray.to(torch.int32), dim=-1)).cpu().numpy() ray_mask_tensor.masked_scatter_(ray_mask_tensor, masked_valid_ray) sample_pidx_tensor = torch.masked_select(sample_pidx_tensor, masked_valid_ray[..., None, None].expand(-1, -1, SR, K)).reshape(B, R, SR, K) sample_loc_tensor = torch.masked_select(sample_loc_tensor, masked_valid_ray[..., None, None].expand(-1, -1, SR, 3)).reshape(B, R, SR, 3) # self.count+=1 return sample_pidx_tensor, sample_loc_tensor, ray_mask_tensor.to(torch.int8) def load_pnts(point_path, point_num): with open(point_path, 'rb') as f: print("point_file_path################", point_path) all_infos = pickle.load(f) point_xyz = all_infos["point_xyz"] print(len(point_xyz), point_xyz.dtype, np.mean(point_xyz, axis=0), np.min(point_xyz, axis=0), np.max(point_xyz, axis=0)) np.random.shuffle(point_xyz) return point_xyz[:min(len(point_xyz), point_num), :] def np_to_gpuarray(*args): result = [] for x in args: if isinstance(x, np.ndarray): result.append(pycuda.gpuarray.to_gpu(x)) else: print("trans",x) return result def save_points(xyz, dir, filename): if xyz.ndim < 3: xyz = xyz[None, ...] filename = "{}.txt".format(filename) os.makedirs(dir, exist_ok=True) filepath = os.path.join(dir, filename) print("save at {}".format(filepath)) if torch.is_tensor(xyz): np.savetxt(filepath, xyz.cpu().reshape(-1, xyz.shape[-1]), delimiter=";") else: np.savetxt(filepath, xyz.reshape(-1, xyz.shape[-1]), delimiter=";") def try_build(ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj, radius_limit, depth_limit, near_depth, far_depth, shading_count, split=["train"], imgidx=0, gpu=0, NN=2): # point_path = os.path.join(point_dir, point_file) # point_xyz = load_pnts(point_path, 819200000) # 81920 233872 point_xyz = load_init_points(obj) imgs, poses, _, hwf, _, intrinsic = load_blender_data( os.path.expandvars("${nrDataRoot}") + "/nerf/nerf_synthetic/{}".format(obj), split, half_res=False, testskip=1) H, W, focal = hwf intrinsic = np.array([[focal, 0, W / 2], [0, focal, H / 2], [0, 0, 1]]) plt.figure() plt.imshow(imgs[imgidx]) point_xyz_w_tensor = torch.as_tensor(point_xyz, device="cuda:{}".format(gpu))[None,...] print("point_xyz_w_tensor", point_xyz_w_tensor[0].shape, torch.min(point_xyz_w_tensor[0], dim=0)[0], torch.max(point_xyz_w_tensor[0], dim=0)[0]) # plt.show() actual_numpoints_tensor = torch.ones([1], device=point_xyz_w_tensor.device, dtype=torch.int32) * len(point_xyz_w_tensor[0]) # range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, kernel_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, kernel_size) pixel_idx_tensor = torch.as_tensor(pixel_idx, device=point_xyz_w_tensor.device, dtype=torch.int32)[None, ...] c2w = poses[0] print("c2w", c2w.shape, pixel_idx.shape) from data.data_utils import get_dtu_raydir cam_pos, camrot = c2w[:3, 3], c2w[:3, :3] ray_dirs_tensor, cam_pos_tensor = torch.as_tensor(get_dtu_raydir(pixel_idx, intrinsic, camrot, True), device=pixel_idx_tensor.device, dtype=torch.float32), torch.as_tensor(cam_pos, device=pixel_idx_tensor.device, dtype=torch.float32) from collections import namedtuple opt_construct = namedtuple('opt', 'inverse vsize vscale kernel_size radius_limit_scale depth_limit_scale max_o P SR K gpu_maxthr NN ranges z_depth_dim') opt = opt_construct(inverse=0, vscale=vscale, vsize=vsize, kernel_size=kernel_size, radius_limit_scale=0, depth_limit_scale=0, max_o=max_o, P=P, SR=SR, K=K, gpu_maxthr=1024, NN=NN, ranges=ranges, z_depth_dim=400) querier = lighting_fast_querier(point_xyz_w_tensor.device, opt) print("actual_numpoints_tensor", actual_numpoints_tensor) querier.query_points(pixel_idx_tensor, None, point_xyz_w_tensor, actual_numpoints_tensor, H, W, intrinsic, near_depth, far_depth, ray_dirs_tensor[None, ...], cam_pos_tensor[None, ...]) def w2img(point_xyz, transform_matrix, focal): camrot = transform_matrix[:3, :3] # world 2 cam campos = transform_matrix[:3, 3] # point_xyz_shift = point_xyz - campos[None, :] # xyz = np.sum(point_xyz_shift[:,None,:] * camrot.T, axis=-1) xyz = np.sum(camrot[None, ...] * point_xyz_shift[:, :, None], axis=-2) # print(xyz.shape, np.sum(camrot[None, None, ...] * point_xyz_shift[:,:,None], axis=-2).shape) xper = xyz[:, 0] / -xyz[:, 2] yper = xyz[:, 1] / xyz[:, 2] x_pixel = np.round(xper * focal + 400).astype(np.int32) y_pixel = np.round(yper * focal + 400).astype(np.int32) print("focal", focal, np.tan(.5 * 0.6911112070083618)) print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel)) print("per xmax xmin:", np.max(xper), np.min(xper), "per ymax ymin:", np.max(yper), np.min(yper), "per zmax zmin:", np.max(xyz[:, 2]), np.min(xyz[:, 2])) print("min perx", -400 / focal, "max perx", 400 / focal) background = np.ones([800, 800, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .2 plt.figure() plt.imshow(background) return np.stack([xper, yper, -xyz[:, 2]], axis=-1) def render_mask_pers_points(queried_point_xyz, vsize, ranges, w, h): pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32) print(pixel_xy_inds.shape) y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0] background = np.ones([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .5 plt.figure() plt.imshow(background) def save_mask_pers_points(queried_point_xyz, vsize, ranges, w, h): pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32) print(pixel_xy_inds.shape) y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0] background = np.ones([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .5 image_dir = os.path.join(self.opt.checkpoints_dir, opt.name, 'images') image_file = os.path.join(image_dir) def render_pixel_mask(pixel_xy_inds, w, h): y_pixel, x_pixel = pixel_xy_inds[0, :, 1], pixel_xy_inds[0, :, 0] background = np.ones([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .0 plt.figure() plt.imshow(background) def vis_vox(ranges_np, scaled_vsize_np, coor_2_occ_tensor): print("ranges_np", ranges_np, scaled_vsize_np) mask = coor_2_occ_tensor.cpu().numpy() > 0 xdim, ydim, zdim = coor_2_occ_tensor.shape[1:] x_ = np.arange(0, xdim) y_ = np.arange(0, ydim) z_ = np.arange(0, zdim) x, y, z = np.meshgrid(x_, y_, z_, indexing='ij') xyz = np.stack([x,y,z], axis=-1).reshape(-1,3).astype(np.float32) xyz = ranges_np[None, :3] + (xyz + 0.5) * scaled_vsize_np[None, :] xyz = xyz[mask.reshape(-1)] save_points(xyz, "./", "occ_xyz") print(xyz.shape) def save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor, pixel_idx_cur_tensor, vdim, vsize, ranges): B, R, SR, K = sample_pidx_tensor.shape # pixel_inds = torch.as_tensor([3210, 3217,3218,3219,3220, 3221,3222,3223,3224,3225,3226,3227,3228,3229,3230, 3231,3232,3233,3234,3235, 3236,3237,3238,3239,3240], device=sample_pidx_tensor.device, dtype=torch.int64) point_inds = sample_pidx_tensor[0, :, :, :] # point_inds = sample_pidx_tensor[0, pixel_inds, :, :] mask = point_inds > -1 point_inds = torch.masked_select(point_inds, mask).to(torch.int64) queried_point_xyz_tensor = point_xyz_tensor[0, point_inds, :] queried_point_xyz = queried_point_xyz_tensor.cpu().numpy() print("queried_point_xyz.shape", B, R, SR, K, point_inds.shape, queried_point_xyz_tensor.shape, queried_point_xyz.shape) print("pixel_idx_cur_tensor", pixel_idx_cur_tensor.shape) render_pixel_mask(pixel_idx_cur_tensor.cpu().numpy(), vdim[0], vdim[1]) render_mask_pers_points(point_xyz_pers_tensor[0, point_inds, :].cpu().numpy(), vsize, ranges, vdim[0], vdim[1]) plt.show() def load_init_points(scan, data_dir="/home/xharlie/user_space/data/nrData/nerf/nerf_synthetic_colmap"): points_path = os.path.join(data_dir, scan, "colmap_results/dense/fused.ply") # points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply") assert os.path.exists(points_path) from plyfile import PlyData, PlyElement plydata = PlyData.read(points_path) # plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar')) print("plydata", plydata.elements[0]) x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32) points_xyz = torch.stack([x,y,z], dim=-1).to(torch.float32) return points_xyz if __name__ == "__main__": obj = "lego" # point_file = "{}.pkl".format(obj) # point_dir = os.path.expandvars("${nrDataRoot}/nerf/nerf_synthetic_points/") r = 0.36000002589322094 ranges = np.array([-1., -1.3, -1.2, 1., 1.3, 1.2], dtype=np.float32) vdim = np.array([400, 400, 400], dtype=np.int32) # vsize = np.array([2 * r / vdim[0], 2 * r / vdim[1], 4. / vdim[2]], dtype=np.float32) vsize = np.array([0.005, 0.005, 0.005], dtype=np.float32) vscale = np.array([2, 2, 2], dtype=np.int32) SR = 24 P = 128 K = 8 NN = 2 ray_num = 2048 kernel_size = np.array([5, 5, 5], dtype=np.int32) radius_limit = 0 # r / 400 * 5 #r / 400 * 5 depth_limit = 0 # 4. / 400 * 1.5 # r / 400 * 2 max_o = 500000 near_depth, far_depth = 2., 6. shading_count = 400 xrange = np.arange(0, 800, 1, dtype=np.int32) yrange = np.arange(0, 800, 1, dtype=np.int32) xv, yv = np.meshgrid(xrange, yrange, sparse=False, indexing='ij') inds = np.arange(len(xv.reshape(-1)), dtype=np.int32) np.random.shuffle(inds) inds = inds[:ray_num, ...] pixel_idx = np.stack([xv, yv], axis=-1).reshape(-1, 2)[inds] # 20000 * 2 gpu = 0 imgidx = 3 split = ["train"] if gpu < 0: import pycuda.autoinit else: drv.init() dev1 = drv.Device(gpu) ctx1 = dev1.make_context() try_build(ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj, radius_limit, depth_limit, near_depth, far_depth, shading_count, split=split, imgidx=imgidx, gpu=0, NN=NN)
51,817
55.385201
484
py
pointnerf
pointnerf-master/models/neural_points/neural_points.py
import torch import torch.nn as nn from data.load_blender import load_blender_cloud import numpy as np from ..helpers.networks import init_seq, positional_encoding import matplotlib.pyplot as plt import torch.nn.utils.prune as prune_param class NeuralPoints(nn.Module): @staticmethod def modify_commandline_options(parser, is_train=True): parser.add_argument('--load_points', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--point_noise', type=str, default="", help='pointgaussian_0.1 | pointuniform_0.1') parser.add_argument('--num_point', type=int, default=8192, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--construct_res', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--grid_res', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--cloud_path', type=str, default="", help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--shpnt_jitter', type=str, default="passfunc", help='passfunc | uniform | gaussian') parser.add_argument('--point_features_dim', type=int, default=64, help='number of coarse samples') parser.add_argument('--gpu_maxthr', type=int, default=1024, help='number of coarse samples') parser.add_argument('--z_depth_dim', type=int, default=400, help='number of coarse samples') parser.add_argument('--SR', type=int, default=24, help='max shading points number each ray') parser.add_argument('--K', type=int, default=32, help='max neural points each group') parser.add_argument('--max_o', type=int, default=None, help='max nonempty voxels stored each frustum') parser.add_argument('--P', type=int, default=16, help='max neural points stored each block') parser.add_argument('--NN', type=int, default=0, help='0: radius search | 1: K-NN after radius search | 2: K-NN world coord after pers radius search') parser.add_argument('--radius_limit_scale', type=float, default=5.0, help='max neural points stored each block') parser.add_argument('--depth_limit_scale', type=float, default=1.3, help='max neural points stored each block') parser.add_argument('--default_conf', type=float, default=-1.0, help='max neural points stored each block') parser.add_argument( '--vscale', type=int, nargs='+', default=(2, 2, 1), help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--kernel_size', type=int, nargs='+', default=(7, 7, 1), help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--query_size', type=int, nargs='+', default=(0, 0, 0), help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--xyz_grad', type=int, default=0, help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--feat_grad', type=int, default=1, help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--conf_grad', type=int, default=1, help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--color_grad', type=int, default=1, help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--dir_grad', type=int, default=1, help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--feedforward', type=int, default=0, help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--inverse', type=int, default=0, help= '1 for 1/n depth sweep' ) parser.add_argument( '--point_conf_mode', type=str, default="0", help= '0 for only at features, 1 for multi at weight' ) parser.add_argument( '--point_color_mode', type=str, default="0", help= '0 for only at features, 1 for multi at weight' ) parser.add_argument( '--point_dir_mode', type=str, default="0", help= '0 for only at features, 1 for multi at weight' ) parser.add_argument( '--vsize', type=float, nargs='+', default=(0.005, 0.005, 0.005), help= 'vscale is the block size that store several voxels' ) parser.add_argument( '--wcoord_query', type=int, default="0", help= '0 for perspective voxels, and 1 for world coord, -1 for world coord and using pytorch cuda' ) parser.add_argument( '--ranges', type=float, nargs='+', default=(-100.0, -100.0, -100.0, 100.0, 100.0, 100.0), help='vscale is the block size that store several voxels' ) def __init__(self, num_channels, size, opt, device, checkpoint=None, feature_init_method='rand', reg_weight=0., feedforward=0): super().__init__() assert isinstance(size, int), 'size must be int' self.opt = opt self.grid_vox_sz = 0 self.points_conf, self.points_dir, self.points_color, self.eulers, self.Rw2c = None, None, None, None, None self.device=device if self.opt.load_points ==1: saved_features = None if checkpoint: saved_features = torch.load(checkpoint, map_location=device) if saved_features is not None and "neural_points.xyz" in saved_features: self.xyz = nn.Parameter(saved_features["neural_points.xyz"]) else: point_xyz, _ = load_blender_cloud(self.opt.cloud_path, self.opt.num_point) point_xyz = torch.as_tensor(point_xyz, device=device, dtype=torch.float32) if len(opt.point_noise) > 0: spl = opt.point_noise.split("_") if float(spl[1]) > 0.0: func = getattr(self, spl[0], None) point_xyz = func(point_xyz, float(spl[1])) print("point_xyz shape after jittering: ", point_xyz.shape) print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& Loaded blender cloud ', self.opt.cloud_path, self.opt.num_point, point_xyz.shape) # filepath = "./aaaaaaaaaaaaa_cloud.txt" # np.savetxt(filepath, self.xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";") if self.opt.construct_res > 0: point_xyz, sparse_grid_idx, self.full_grid_idx = self.construct_grid_points(point_xyz) self.xyz = nn.Parameter(point_xyz) # filepath = "./grid_cloud.txt" # np.savetxt(filepath, point_xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";") # print("max counts", torch.max(torch.unique(point_xyz, return_counts=True, dim=0)[1])) print("point_xyz", point_xyz.shape) self.xyz.requires_grad = opt.xyz_grad > 0 shape = 1, self.xyz.shape[0], num_channels # filepath = "./aaaaaaaaaaaaa_cloud.txt" # np.savetxt(filepath, self.xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";") if checkpoint: self.points_embeding = nn.Parameter(saved_features["neural_points.points_embeding"]) if "neural_points.points_embeding" in saved_features else None print("self.points_embeding", self.points_embeding.shape) # points_conf = saved_features["neural_points.points_conf"] if "neural_points.points_conf" in saved_features else None # if self.opt.default_conf > 0.0 and points_conf is not None: # points_conf = torch.ones_like(points_conf) * self.opt.default_conf # self.points_conf = nn.Parameter(points_conf) if points_conf is not None else None self.points_conf = nn.Parameter(saved_features["neural_points.points_conf"]) if "neural_points.points_conf" in saved_features else None # print("self.points_conf",self.points_conf) self.points_dir = nn.Parameter(saved_features["neural_points.points_dir"]) if "neural_points.points_dir" in saved_features else None self.points_color = nn.Parameter(saved_features["neural_points.points_color"]) if "neural_points.points_color" in saved_features else None self.eulers = nn.Parameter(saved_features["neural_points.eulers"]) if "neural_points.eulers" in saved_features else None self.Rw2c = nn.Parameter(saved_features["neural_points.Rw2c"]) if "neural_points.Rw2c" in saved_features else torch.eye(3, device=self.xyz.device, dtype=self.xyz.dtype) else: if feature_init_method == 'rand': points_embeding = torch.rand(shape, device=device, dtype=torch.float32) - 0.5 elif feature_init_method == 'zeros': points_embeding = torch.zeros(shape, device=device, dtype=torch.float32) elif feature_init_method == 'ones': points_embeding = torch.ones(shape, device=device, dtype=torch.float32) elif feature_init_method == 'pos': if self.opt.point_features_dim > 3: points_embeding = positional_encoding(point_xyz.reshape(shape[0], shape[1], 3), int(self.opt.point_features_dim / 6)) if int(self.opt.point_features_dim / 6) * 6 < self.opt.point_features_dim: rand_embeding = torch.rand(shape[:-1] + (self.opt.point_features_dim - points_embeding.shape[-1],), device=device, dtype=torch.float32) - 0.5 print("points_embeding", points_embeding.shape, rand_embeding.shape) points_embeding = torch.cat([points_embeding, rand_embeding], dim=-1) else: points_embeding = point_xyz.reshape(shape[0], shape[1], 3) elif feature_init_method.startswith("gau"): std = float(feature_init_method.split("_")[1]) zeros = torch.zeros(shape, device=device, dtype=torch.float32) points_embeding = torch.normal(mean=zeros, std=std) else: raise ValueError(init_method) self.points_embeding = nn.Parameter(points_embeding) print("points_embeding init:", points_embeding.shape, torch.max(self.points_embeding), torch.min(self.points_embeding)) self.points_conf=torch.ones_like(self.points_embeding[...,0:1]) if self.points_embeding is not None: self.points_embeding.requires_grad = opt.feat_grad > 0 if self.points_conf is not None: self.points_conf.requires_grad = self.opt.conf_grad > 0 if self.points_dir is not None: self.points_dir.requires_grad = self.opt.dir_grad > 0 if self.points_color is not None: self.points_color.requires_grad = self.opt.color_grad > 0 if self.eulers is not None: self.eulers.requires_grad = False if self.Rw2c is not None: self.Rw2c.requires_grad = False self.reg_weight = reg_weight self.opt.query_size = self.opt.kernel_size if self.opt.query_size[0] == 0 else self.opt.query_size # self.lighting_fast_querier = lighting_fast_querier_w if self.opt.wcoord_query > 0 else lighting_fast_querier_p if self.opt.wcoord_query == 0: from .query_point_indices import lighting_fast_querier as lighting_fast_querier_p self.lighting_fast_querier = lighting_fast_querier_p elif self.opt.wcoord_query > 0: from .query_point_indices_worldcoords import lighting_fast_querier as lighting_fast_querier_w self.lighting_fast_querier = lighting_fast_querier_w else: from .point_query import lighting_fast_querier as lighting_fast_querier_cuda self.lighting_fast_querier = lighting_fast_querier_cuda self.querier = self.lighting_fast_querier(device, self.opt) def reset_querier(self): self.querier.clean_up() del self.querier self.querier = self.lighting_fast_querier(self.device, self.opt) def prune(self, thresh): mask = self.points_conf[0,...,0] >= thresh self.xyz = nn.Parameter(self.xyz[mask, :]) self.xyz.requires_grad = self.opt.xyz_grad > 0 if self.points_embeding is not None: self.points_embeding = nn.Parameter(self.points_embeding[:, mask, :]) self.points_embeding.requires_grad = self.opt.feat_grad > 0 if self.points_conf is not None: self.points_conf = nn.Parameter(self.points_conf[:, mask, :]) self.points_conf.requires_grad = self.opt.conf_grad > 0 if self.points_dir is not None: self.points_dir = nn.Parameter(self.points_dir[:, mask, :]) self.points_dir.requires_grad = self.opt.dir_grad > 0 if self.points_color is not None: self.points_color = nn.Parameter(self.points_color[:, mask, :]) self.points_color.requires_grad = self.opt.color_grad > 0 if self.eulers is not None and self.eulers.dim() > 1: self.eulers = nn.Parameter(self.eulers[mask, :]) self.eulers.requires_grad = False if self.Rw2c is not None and self.Rw2c.dim() > 2: self.Rw2c = nn.Parameter(self.Rw2c[mask, :]) self.Rw2c.requires_grad = False print("@@@@@@@@@ pruned {}/{}".format(torch.sum(mask==0), mask.shape[0])) def grow_points(self, add_xyz, add_embedding, add_color, add_dir, add_conf, add_eulers=None, add_Rw2c=None): # print(self.xyz.shape, self.points_conf.shape, self.points_embeding.shape, self.points_dir.shape, self.points_color.shape) self.xyz = nn.Parameter(torch.cat([self.xyz, add_xyz], dim=0)) self.xyz.requires_grad = self.opt.xyz_grad > 0 if self.points_embeding is not None: self.points_embeding = nn.Parameter(torch.cat([self.points_embeding, add_embedding[None, ...]], dim=1)) self.points_embeding.requires_grad = self.opt.feat_grad > 0 if self.points_conf is not None: self.points_conf = nn.Parameter(torch.cat([self.points_conf, add_conf[None, ...]], dim=1)) self.points_conf.requires_grad = self.opt.conf_grad > 0 if self.points_dir is not None: self.points_dir = nn.Parameter(torch.cat([self.points_dir, add_dir[None, ...]], dim=1)) self.points_dir.requires_grad = self.opt.dir_grad > 0 if self.points_color is not None: self.points_color = nn.Parameter(torch.cat([self.points_color, add_color[None, ...]], dim=1)) self.points_color.requires_grad = self.opt.color_grad > 0 if self.eulers is not None and self.eulers.dim() > 1: self.eulers = nn.Parameter(torch.cat([self.eulers, add_eulers[None,...]], dim=1)) self.eulers.requires_grad = False if self.Rw2c is not None and self.Rw2c.dim() > 2: self.Rw2c = nn.Parameter(torch.cat([self.Rw2c, add_Rw2c[None,...]], dim=1)) self.Rw2c.requires_grad = False def set_points(self, points_xyz, points_embeding, points_color=None, points_dir=None, points_conf=None, parameter=False, Rw2c=None, eulers=None): if points_embeding.shape[-1] > self.opt.point_features_dim: points_embeding = points_embeding[..., :self.opt.point_features_dim] if self.opt.default_conf > 0.0 and self.opt.default_conf <= 1.0 and points_conf is not None: points_conf = torch.ones_like(points_conf) * self.opt.default_conf if parameter: self.xyz = nn.Parameter(points_xyz) self.xyz.requires_grad = self.opt.xyz_grad > 0 if points_conf is not None: points_conf = nn.Parameter(points_conf) points_conf.requires_grad = self.opt.conf_grad > 0 if "0" in list(self.opt.point_conf_mode): points_embeding = torch.cat([points_conf, points_embeding], dim=-1) if "1" in list(self.opt.point_conf_mode): self.points_conf = points_conf if points_dir is not None: points_dir = nn.Parameter(points_dir) points_dir.requires_grad = self.opt.dir_grad > 0 if "0" in list(self.opt.point_dir_mode): points_embeding = torch.cat([points_dir, points_embeding], dim=-1) if "1" in list(self.opt.point_dir_mode): self.points_dir = points_dir if points_color is not None: points_color = nn.Parameter(points_color) points_color.requires_grad = self.opt.color_grad > 0 if "0" in list(self.opt.point_color_mode): points_embeding = torch.cat([points_color, points_embeding], dim=-1) if "1" in list(self.opt.point_color_mode): self.points_color = points_color points_embeding = nn.Parameter(points_embeding) points_embeding.requires_grad = self.opt.feat_grad > 0 self.points_embeding = points_embeding # print("self.points_embeding", self.points_embeding, self.points_color) # print("points_xyz", torch.min(points_xyz, dim=-2)[0], torch.max(points_xyz, dim=-2)[0]) else: self.xyz = points_xyz if points_conf is not None: if "0" in list(self.opt.point_conf_mode): points_embeding = torch.cat([points_conf, points_embeding], dim=-1) if "1" in list(self.opt.point_conf_mode): self.points_conf = points_conf if points_dir is not None: if "0" in list(self.opt.point_dir_mode): points_embeding = torch.cat([points_dir, points_embeding], dim=-1) if "1" in list(self.opt.point_dir_mode): self.points_dir = points_dir if points_color is not None: if "0" in list(self.opt.point_color_mode): points_embeding = torch.cat([points_color, points_embeding], dim=-1) if "1" in list(self.opt.point_color_mode): self.points_color = points_color self.points_embeding = points_embeding if Rw2c is None: self.Rw2c = torch.eye(3, device=points_xyz.device, dtype=points_xyz.dtype) else: self.Rw2c = nn.Parameter(Rw2c) self.Rw2c.requires_grad = False def editing_set_points(self, points_xyz, points_embeding, points_color=None, points_dir=None, points_conf=None, parameter=False, Rw2c=None, eulers=None): if self.opt.default_conf > 0.0 and self.opt.default_conf <= 1.0 and points_conf is not None: points_conf = torch.ones_like(points_conf) * self.opt.default_conf self.xyz = points_xyz self.points_embeding = points_embeding self.points_dir = points_dir self.points_conf = points_conf self.points_color = points_color if Rw2c is None: self.Rw2c = torch.eye(3, device=points_xyz.device, dtype=points_xyz.dtype) else: self.Rw2c = Rw2c def construct_grid_points(self, xyz): # --construct_res' '--grid_res', xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0] self.space_edge = torch.max(xyz_max - xyz_min) * 1.1 xyz_mid = (xyz_max + xyz_min) / 2 self.space_min = xyz_mid - self.space_edge / 2 self.space_max = xyz_mid + self.space_edge / 2 self.construct_vox_sz = self.space_edge / self.opt.construct_res self.grid_vox_sz = self.space_edge / self.opt.grid_res xyz_shift = xyz - self.space_min[None, ...] construct_vox_idx = torch.unique(torch.floor(xyz_shift / self.construct_vox_sz[None, ...]).to(torch.int16), dim=0) # print("construct_grid_idx", construct_grid_idx.shape) torch.Size([7529, 3]) cg_ratio = int(self.opt.grid_res / self.opt.construct_res) gx = torch.arange(0, cg_ratio+1, device=construct_vox_idx.device, dtype=construct_vox_idx.dtype) gy = torch.arange(0, cg_ratio+1, device=construct_vox_idx.device, dtype=construct_vox_idx.dtype) gz = torch.arange(0, cg_ratio+1, device=construct_vox_idx.device, dtype=construct_vox_idx.dtype) gx, gy, gz = torch.meshgrid(gx, gy, gz) gxyz = torch.stack([gx, gy, gz], dim=-1).view(1, -1, 3) sparse_grid_idx = construct_vox_idx[:, None, :] * cg_ratio + gxyz # sparse_grid_idx.shape: ([7529, 9*9*9, 3]) -> ([4376896, 3]) sparse_grid_idx = torch.unique(sparse_grid_idx.view(-1, 3), dim=0).to(torch.int64) full_grid_idx = torch.full([self.opt.grid_res+1,self.opt.grid_res+1,self.opt.grid_res+1], -1, device=xyz.device, dtype=torch.int32) # full_grid_idx.shape: ([401, 401, 401]) full_grid_idx[sparse_grid_idx[...,0], sparse_grid_idx[...,1], sparse_grid_idx[...,2]] = torch.arange(0, sparse_grid_idx.shape[0], device=full_grid_idx.device, dtype=full_grid_idx.dtype) xyz = self.space_min[None, ...] + sparse_grid_idx * self.grid_vox_sz return xyz, sparse_grid_idx, full_grid_idx def null_grad(self): self.points_embeding.grad = None self.xyz.grad = None def reg_loss(self): return self.reg_weight * torch.mean(torch.pow(self.points_embeding, 2)) def pers2img(self, point_xyz_pers_tensor, pixel_id, pixel_idx_cur, ray_mask, sample_pidx, ranges, h, w, inputs): xper = point_xyz_pers_tensor[..., 0].cpu().numpy() yper = point_xyz_pers_tensor[..., 1].cpu().numpy() x_pixel = np.clip(np.round((xper-ranges[0]) * (w-1) / (ranges[3]-ranges[0])).astype(np.int32), 0, w-1)[0] y_pixel = np.clip(np.round((yper-ranges[1]) * (h-1) / (ranges[4]-ranges[1])).astype(np.int32), 0, h-1)[0] print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel), sample_pidx.shape,y_pixel.shape) background = np.zeros([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = self.points_embeding.cpu().numpy()[0,...] background[pixel_idx_cur[0,...,1],pixel_idx_cur[0,...,0],0] = 1.0 background[y_pixel[sample_pidx[-1]], x_pixel[sample_pidx[-1]], :] = self.points_embeding.cpu().numpy()[0,sample_pidx[-1]] gtbackground = np.ones([h, w, 3], dtype=np.float32) gtbackground[pixel_idx_cur[0 ,..., 1], pixel_idx_cur[0 , ..., 0],:] = inputs["gt_image"].cpu().numpy()[0][ray_mask[0]>0] print("diff sum",np.sum(inputs["gt_image"].cpu().numpy()[0][ray_mask[0]>0]-self.points_embeding.cpu().numpy()[0,sample_pidx[...,1,0][-1]])) plt.figure() plt.imshow(background) plt.figure() plt.imshow(gtbackground) plt.show() def get_point_indices(self, inputs, cam_rot_tensor, cam_pos_tensor, pixel_idx_tensor, near_plane, far_plane, h, w, intrinsic, vox_query=False): point_xyz_pers_tensor = self.w2pers(self.xyz, cam_rot_tensor, cam_pos_tensor) actual_numpoints_tensor = torch.ones([point_xyz_pers_tensor.shape[0]], device=point_xyz_pers_tensor.device, dtype=torch.int32) * point_xyz_pers_tensor.shape[1] # print("pixel_idx_tensor", pixel_idx_tensor) # print("point_xyz_pers_tensor", point_xyz_pers_tensor.shape) # print("actual_numpoints_tensor", actual_numpoints_tensor.shape) # sample_pidx_tensor: B, R, SR, K ray_dirs_tensor = inputs["raydir"] # print("ray_dirs_tensor", ray_dirs_tensor.shape, self.xyz.shape) sample_pidx_tensor, sample_loc_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize, ranges = self.querier.query_points(pixel_idx_tensor, point_xyz_pers_tensor, self.xyz[None,...], actual_numpoints_tensor, h, w, intrinsic, near_plane, far_plane, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor) # print("ray_mask_tensor",ray_mask_tensor.shape) # self.pers2img(point_xyz_pers_tensor, pixel_idx_tensor.cpu().numpy(), pixel_idx_cur_tensor.cpu().numpy(), ray_mask_tensor.cpu().numpy(), sample_pidx_tensor.cpu().numpy(), ranges, h, w, inputs) B, _, SR, K = sample_pidx_tensor.shape if vox_query: if sample_pidx_tensor.shape[1] > 0: sample_pidx_tensor = self.query_vox_grid(sample_loc_w_tensor, self.full_grid_idx, self.space_min, self.grid_vox_sz) else: sample_pidx_tensor = torch.zeros([B, 0, SR, 8], device=sample_pidx_tensor.device, dtype=sample_pidx_tensor.dtype) return sample_pidx_tensor, sample_loc_tensor, ray_mask_tensor, point_xyz_pers_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, vsize def query_vox_grid(self, sample_loc_w_tensor, full_grid_idx, space_min, grid_vox_sz): # sample_pidx_tensor = torch.full(sample_loc_w_tensor.shape[:-1]+(8,), -1, device=sample_loc_w_tensor.device, dtype=torch.int64) B, R, SR, _ = sample_loc_w_tensor.shape vox_ind = torch.floor((sample_loc_w_tensor - space_min[None, None, None, :]) / grid_vox_sz).to(torch.int64) # B, R, SR, 3 shift = torch.as_tensor([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.int64, device=full_grid_idx.device).reshape(1, 1, 1, 8, 3) vox_ind = vox_ind[..., None, :] + shift # B, R, SR, 8, 3 vox_mask = torch.any(torch.logical_or(vox_ind < 0, vox_ind > self.opt.grid_res).view(B, R, SR, -1), dim=3) vox_ind = torch.clamp(vox_ind, min=0, max=self.opt.grid_res).view(-1, 3) inds = full_grid_idx[vox_ind[..., 0], vox_ind[..., 1], vox_ind[..., 2]].view(B, R, SR, 8) inds[vox_mask, :] = -1 # -1 for all 8 corners inds[torch.any(inds < 0, dim=-1), :] = -1 return inds.to(torch.int64) # def w2pers(self, point_xyz, camrotc2w, campos): # point_xyz_shift = point_xyz[None, ...] - campos[:, None, :] # xyz = torch.sum(camrotc2w[:, None, :, :] * point_xyz_shift[:, :, :, None], dim=-2) # # print(xyz.shape, (point_xyz_shift[:, None, :] * camrot.T).shape) # xper = xyz[:, :, 0] / -xyz[:, :, 2] # yper = xyz[:, :, 1] / xyz[:, :, 2] # return torch.stack([xper, yper, -xyz[:, :, 2]], dim=-1) def w2pers(self, point_xyz, camrotc2w, campos): point_xyz_shift = point_xyz[None, ...] - campos[:, None, :] xyz = torch.sum(camrotc2w[:, None, :, :] * point_xyz_shift[:, :, :, None], dim=-2) # print(xyz.shape, (point_xyz_shift[:, None, :] * camrot.T).shape) xper = xyz[:, :, 0] / xyz[:, :, 2] yper = xyz[:, :, 1] / xyz[:, :, 2] return torch.stack([xper, yper, xyz[:, :, 2]], dim=-1) def vect2euler(self, xyz): yz_norm = torch.norm(xyz[...,1:3], dim=-1) e_x = torch.atan2(-xyz[...,1], xyz[...,2]) e_y = torch.atan2(xyz[...,0], yz_norm) e_z = torch.zeros_like(e_y) e_xyz = torch.stack([e_x, e_y, e_z], dim=-1) return e_xyz def euler2Rc2w(self, e_xyz): cosxyz = torch.cos(e_xyz) sinxyz = torch.sin(e_xyz) cxsz = cosxyz[...,0]*sinxyz[...,2] czsy = cosxyz[...,2]*sinxyz[...,1] sxsz = sinxyz[...,0]*sinxyz[...,2] r1 = torch.stack([cosxyz[...,1]*cosxyz[...,2], czsy*sinxyz[...,0] - cxsz, czsy*cosxyz[...,0] + sxsz], dim=-1) r2 = torch.stack([cosxyz[...,1]*sinxyz[...,2], cosxyz[...,0]*cosxyz[...,2] + sxsz*sinxyz[...,1], -cosxyz[...,2]*sinxyz[...,0] + cxsz * sinxyz[...,1]], dim=-1) r3 = torch.stack([-sinxyz[...,1], cosxyz[...,1]*sinxyz[...,0], cosxyz[...,0]*cosxyz[...,1]], dim=-1) Rzyx = torch.stack([r1, r2, r3], dim=-2) return Rzyx def euler2Rw2c(self, e_xyz): c = torch.cos(-e_xyz) s = torch.sin(-e_xyz) r1 = torch.stack([c[...,1] * c[...,2], -s[...,2], c[...,2]*s[...,1]], dim=-1) r2 = torch.stack([s[...,0]*s[...,1] + c[...,0]*c[...,1]*s[...,2], c[...,0]*c[...,2], -c[...,1]*s[...,0]+c[...,0]*s[...,1]*s[...,2]], dim=-1) r3 = torch.stack([-c[...,0]*s[...,1]+c[...,1]*s[...,0]*s[...,2], c[...,2]*s[...,0], c[...,0]*c[...,1]+s[...,0]*s[...,1]*s[...,2]], dim=-1) Rxyz = torch.stack([r1, r2, r3], dim=-2) return Rxyz def get_w2c(self, cam_xyz, Rw2c): t = -Rw2c @ cam_xyz[..., None] # N, 3 M = torch.cat([Rw2c, t], dim=-1) ones = torch.as_tensor([[[0, 0, 0, 1]]], device=M.device, dtype=M.dtype).expand(len(M),-1, -1) return torch.cat([M, ones], dim=-2) def get_c2w(self, cam_xyz, Rc2w): M = torch.cat([Rc2w, cam_xyz[..., None]], dim=-1) ones = torch.as_tensor([[[0, 0, 0, 1]]], device=M.device, dtype=M.dtype).expand(len(M),-1, -1) return torch.cat([M, ones], dim=-2) # def pers2w(self, point_xyz_pers, camrotc2w, campos): # # point_xyz_pers B X M X 3 # # x_pers = point_xyz_pers[..., 0] * point_xyz_pers[..., 2] # y_pers = - point_xyz_pers[..., 1] * point_xyz_pers[..., 2] # z_pers = - point_xyz_pers[..., 2] # xyz_c = torch.stack([x_pers, y_pers, z_pers], dim=-1) # xyz_w_shift = torch.sum(xyz_c[...,None,:] * camrotc2w, dim=-1) # # print("point_xyz_pers[..., 0, 0]", point_xyz_pers[..., 0, 0].shape, point_xyz_pers[..., 0, 0]) # ray_dirs = xyz_w_shift / (torch.linalg.norm(xyz_w_shift, dim=-1, keepdims=True) + 1e-7) # # xyz_w = xyz_w_shift + campos[:, None, :] # return xyz_w, ray_dirs def passfunc(self, input, vsize): return input def pointgaussian(self, input, std): M, C = input.shape input = torch.normal(mean=input, std=std) return input def pointuniform(self, input, std): M, C = input.shape jitters = torch.rand([M, C], dtype=torch.float32, device=input.device) - 0.5 input = input + jitters * std * 2 return input def pointuniformadd(self, input, std): addinput = self.pointuniform(input, std) return torch.cat([input,addinput], dim=0) def pointuniformdouble(self, input, std): input = self.pointuniform(torch.cat([input,input], dim=0), std) return input def forward(self, inputs): pixel_idx, camrotc2w, campos, near_plane, far_plane, h, w, intrinsic = inputs["pixel_idx"].to(torch.int32), inputs["camrotc2w"], inputs["campos"], inputs["near"], inputs["far"], inputs["h"], inputs["w"], inputs["intrinsic"] # 1, 294, 24, 32; 1, 294, 24; 1, 291, 2 sample_pidx, sample_loc, ray_mask_tensor, point_xyz_pers_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, vsize = self.get_point_indices(inputs, camrotc2w, campos, pixel_idx, torch.min(near_plane).cpu().numpy(), torch.max(far_plane).cpu().numpy(), torch.max(h).cpu().numpy(), torch.max(w).cpu().numpy(), intrinsic.cpu().numpy()[0], vox_query=self.opt.NN<0) sample_pnt_mask = sample_pidx >= 0 B, R, SR, K = sample_pidx.shape sample_pidx = torch.clamp(sample_pidx, min=0).view(-1).long() sampled_embedding = torch.index_select(torch.cat([self.xyz[None, ...], point_xyz_pers_tensor, self.points_embeding], dim=-1), 1, sample_pidx).view(B, R, SR, K, self.points_embeding.shape[2]+self.xyz.shape[1]*2) sampled_color = None if self.points_color is None else torch.index_select(self.points_color, 1, sample_pidx).view(B, R, SR, K, self.points_color.shape[2]) sampled_dir = None if self.points_dir is None else torch.index_select(self.points_dir, 1, sample_pidx).view(B, R, SR, K, self.points_dir.shape[2]) sampled_conf = None if self.points_conf is None else torch.index_select(self.points_conf, 1, sample_pidx).view(B, R, SR, K, self.points_conf.shape[2]) sampled_Rw2c = self.Rw2c if self.Rw2c.dim() == 2 else torch.index_select(self.Rw2c, 0, sample_pidx).view(B, R, SR, K, self.Rw2c.shape[1], self.Rw2c.shape[2]) # filepath = "./sampled_xyz_full.txt" # np.savetxt(filepath, self.xyz.reshape(-1, 3).detach().cpu().numpy(), delimiter=";") # # filepath = "./sampled_xyz_pers_full.txt" # np.savetxt(filepath, point_xyz_pers_tensor.reshape(-1, 3).detach().cpu().numpy(), delimiter=";") # if self.xyz.grad is not None: # print("xyz grad:", self.xyz.requires_grad, torch.max(self.xyz.grad), torch.min(self.xyz.grad)) # if self.points_embeding.grad is not None: # print("points_embeding grad:", self.points_embeding.requires_grad, torch.max(self.points_embeding.grad)) # print("points_embeding 3", torch.max(self.points_embeding), torch.min(self.points_embeding)) return sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding[..., 6:], sampled_embedding[..., 3:6], sampled_embedding[..., :3], sample_pnt_mask, sample_loc, sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize, self.grid_vox_sz
35,753
47.978082
368
py
pointnerf
pointnerf-master/models/neural_points/query_point_indices.py
import os import numpy as np from numpy import dot from math import sqrt import pycuda from pycuda.compiler import SourceModule import pycuda.driver as drv import pycuda.gpuarray as gpuarray import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d import Axes3D import torch import pickle import time # import cupy # import open3d.ml.tf as ml3d # import frnn from data.load_blender import load_blender_data # X = torch.cuda.FloatTensor(8) class Holder(pycuda.driver.PointerHolderBase): def __init__(self, t): super(Holder, self).__init__() self.t = t self.gpudata = t.data_ptr() def get_pointer(self): return self.t.data_ptr() class lighting_fast_querier(): def __init__(self, device, opt): print("querier device", device, device.index) self.gpu = device.index self.opt = opt drv.init() # self.device = drv.Device(gpu) self.ctx = drv.Device(self.gpu).make_context() self.get_occ_vox, self.near_vox_full, self.insert_vox_points, self.query_along_ray = self.build_cuda() self.inverse = self.opt.inverse def clean_up(self): self.ctx.pop() def get_hyperparameters(self, h, w, intrinsic, near_depth, far_depth): # print("h,w,focal,near,far", h.shape, w.shape, focal.shape, near_depth.shape, far_depth.shape) # x_r = w / 2 / focal # y_r = h / 2 / focal # ranges = np.array([-x_r, -y_r, near_depth, x_r, y_r, far_depth], dtype=np.float32) # vdim = np.array([h, w, self.opt.z_depth_dim], dtype=np.int32) # vsize = np.array([2 * x_r / vdim[0], 2 * y_r / vdim[1], z_r / vdim[2]], dtype=np.float32) x_rl, x_rh = -intrinsic[0, 2] / intrinsic[0, 0], (w - intrinsic[0, 2]) / intrinsic[0, 0] y_rl, y_rh = -intrinsic[1, 2] / intrinsic[1, 1], (h - intrinsic[1, 2]) / intrinsic[1, 1], z_r = (far_depth - near_depth) if self.inverse == 0 else (1.0 / near_depth - 1.0 / far_depth) # [-0.22929783 -0.1841962 2.125 0.21325193 0.17096843 4.525 ] ranges = np.array([x_rl, y_rl, near_depth, x_rh, y_rh, far_depth], dtype=np.float32) if self.inverse == 0 else np.array([x_rl, y_rl, 1.0 / far_depth, x_rh, y_rh, 1.0 / near_depth], dtype=np.float32) vdim = np.array([w, h, self.opt.z_depth_dim], dtype=np.int32) vsize = np.array([(x_rh - x_rl) / vdim[0], (y_rh - y_rl) / vdim[1], z_r / vdim[2]], dtype=np.float32) vscale = np.array(self.opt.vscale, dtype=np.int32) scaled_vdim = np.ceil(vdim / vscale).astype(np.int32) scaled_vsize = (vsize * vscale).astype(np.float32) range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, np.asarray(self.opt.kernel_size, dtype=np.int32), np.asarray(self.opt.query_size, dtype=np.int32)) radius_limit, depth_limit = self.opt.radius_limit_scale * max(vsize[0], vsize[1]), self.opt.depth_limit_scale * vsize[2] return radius_limit.astype(np.float32), depth_limit.astype(np.float32), ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu def query_points(self, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, h, w, intrinsic, near_depth, far_depth, ray_dirs_tensor, cam_pos_tensor, cam_rot_tensor): # print("attr", hasattr(self, "h"), self.opt.feedforward) # # if not hasattr(self, "h") or self.opt.feedforward > 0 or self.vscale != self.opt.vscale or self.kernel_size != self.opt.kernel_size: # radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(h, w, intrinsic, near_depth, far_depth) # if self.opt.feedforward==0: # self.radius_limit, self.depth_limit, self.ranges, self.vsize, self.vdim, self.scaled_vsize, self.scaled_vdim, self.vscale, self.range_gpu, self.scaled_vsize_gpu, self.scaled_vdim_gpu, self.vscale_gpu, self.kernel_size_gpu, self.kernel_size, self.query_size_gpu = radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, self.opt.kernel_size, query_size_gpu # # else: # radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.radius_limit, self.depth_limit, self.ranges, self.vsize, self.vdim, self.scaled_vsize, self.scaled_vdim, self.vscale, self.range_gpu, self.scaled_vsize_gpu, self.scaled_vdim_gpu, self.vscale_gpu, self.kernel_size_gpu, self.query_size_gpu radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu = self.get_hyperparameters(h, w, intrinsic, near_depth, far_depth) self.radius_limit, self.depth_limit, self.ranges, self.vsize, self.vdim, self.scaled_vsize, self.scaled_vdim, self.vscale, self.range_gpu, self.scaled_vsize_gpu, self.scaled_vdim_gpu, self.vscale_gpu, self.c, self.query_size_gpu = radius_limit, depth_limit, ranges, vsize, vdim, scaled_vsize, scaled_vdim, vscale, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kernel_size_gpu, query_size_gpu sample_pidx_tensor, sample_loc_tensor, pixel_idx_cur_tensor, ray_mask_tensor = self.query_grid_point_index(h, w,pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, self.opt.SR, self.opt.K, ranges, scaled_vsize, scaled_vdim, vscale, self.opt.max_o, self.opt.P, radius_limit, depth_limit, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kMaxThreadsPerBlock=self.opt.gpu_maxthr) self.inverse = self.opt.inverse if self.opt.is_train: sample_loc_tensor = getattr(self, self.opt.shpnt_jitter, None)(sample_loc_tensor, vsize) sample_loc_w_tensor, sample_ray_dirs_tensor = self.pers2w(sample_loc_tensor, cam_rot_tensor, cam_pos_tensor) return sample_pidx_tensor, sample_loc_tensor, sample_loc_w_tensor, sample_ray_dirs_tensor, ray_mask_tensor, vsize, ranges def pers2w(self, point_xyz_pers, camrotc2w, campos): # point_xyz_pers B X M X 3 x_pers = point_xyz_pers[..., 0] * point_xyz_pers[..., 2] y_pers = point_xyz_pers[..., 1] * point_xyz_pers[..., 2] z_pers = point_xyz_pers[..., 2] xyz_c = torch.stack([x_pers, y_pers, z_pers], dim=-1) xyz_w_shift = torch.sum(xyz_c[...,None,:] * camrotc2w, dim=-1) # print("point_xyz_pers[..., 0, 0]", point_xyz_pers[..., 0, 0].shape, point_xyz_pers[..., 0, 0]) ray_dirs = xyz_w_shift / (torch.linalg.norm(xyz_w_shift, dim=-1, keepdims=True) + 1e-7) xyz_w = xyz_w_shift + campos[:, None, :] return xyz_w, ray_dirs def gaussian(self, input, vsize): B, R, SR, _ = input.shape jitters = torch.normal(mean=torch.zeros([B, R, SR], dtype=torch.float32, device=input.device), std=torch.full([B, R, SR], vsize[2] / 4, dtype=torch.float32, device=input.device)) input[..., 2] = input[..., 2] + torch.clamp(jitters, min=-vsize[2]/2, max=vsize[2]/2) return input def uniform(self, input, vsize): B, R, SR, _ = input.shape jitters = torch.rand([B, R, SR], dtype=torch.float32, device=input.device) - 0.5 input[..., 2] = input[..., 2] + jitters * vsize[2] return input def build_cuda(self): mod = SourceModule( """ #define KN """ + str(self.opt.K) + """ #include <cuda.h> #include <cuda_runtime.h> #include <algorithm> #include <vector> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <curand_kernel.h> namespace cuda { static __device__ inline uint8_t atomicAdd(uint8_t *address, uint8_t val) { size_t offset = (size_t)address & 3; uint32_t *address_as_ui = (uint32_t *)(address - offset); uint32_t old = *address_as_ui; uint32_t shift = offset * 8; uint32_t old_byte; uint32_t newval; uint32_t assumed; do { assumed = old; old_byte = (old >> shift) & 0xff; // preserve size in initial cast. Casting directly to uint32_t pads // negative signed values with 1's (e.g. signed -1 = unsigned ~0). newval = static_cast<uint8_t>(val + old_byte); newval = (old & ~(0x000000ff << shift)) | (newval << shift); old = atomicCAS(address_as_ui, assumed, newval); } while (assumed != old); return __byte_perm(old, 0, offset); // need validate } static __device__ inline char atomicAdd(char* address, char val) { // offset, in bytes, of the char* address within the 32-bit address of the space that overlaps it size_t long_address_modulo = (size_t) address & 3; // the 32-bit address that overlaps the same memory auto* base_address = (unsigned int*) ((char*) address - long_address_modulo); // A 0x3210 selector in __byte_perm will simply select all four bytes in the first argument in the same order. // The "4" signifies the position where the first byte of the second argument will end up in the output. unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210}; // for selecting bytes within a 32-bit chunk that correspond to the char* address (relative to base_address) unsigned int selector = selectors[long_address_modulo]; unsigned int long_old, long_assumed, long_val, replacement; long_old = *base_address; do { long_assumed = long_old; // replace bits in long_old that pertain to the char address with those from val long_val = __byte_perm(long_old, 0, long_address_modulo) + val; replacement = __byte_perm(long_old, long_val, selector); long_old = atomicCAS(base_address, long_assumed, replacement); } while (long_old != long_assumed); return __byte_perm(long_old, 0, long_address_modulo); } static __device__ inline int8_t atomicAdd(int8_t *address, int8_t val) { return (int8_t)cuda::atomicAdd((char*)address, (char)val); } static __device__ inline short atomicAdd(short* address, short val) { unsigned int *base_address = (unsigned int *)((size_t)address & ~2); unsigned int long_val = ((size_t)address & 2) ? ((unsigned int)val << 16) : (unsigned short)val; unsigned int long_old = ::atomicAdd(base_address, long_val); if((size_t)address & 2) { return (short)(long_old >> 16); } else { unsigned int overflow = ((long_old & 0xffff) + long_val) & 0xffff0000; if (overflow) atomicSub(base_address, overflow); return (short)(long_old & 0xffff); } } static __device__ float cas(double *addr, double compare, double val) { unsigned long long int *address_as_ull = (unsigned long long int *) addr; return __longlong_as_double(atomicCAS(address_as_ull, __double_as_longlong(compare), __double_as_longlong(val))); } static __device__ float cas(float *addr, float compare, float val) { unsigned int *address_as_uint = (unsigned int *) addr; return __uint_as_float(atomicCAS(address_as_uint, __float_as_uint(compare), __float_as_uint(val))); } static __device__ inline uint8_t atomicCAS(uint8_t * const address, uint8_t const compare, uint8_t const value) { uint8_t const longAddressModulo = reinterpret_cast< size_t >( address ) & 0x3; uint32_t *const baseAddress = reinterpret_cast< uint32_t * >( address - longAddressModulo ); uint32_t constexpr byteSelection[] = { 0x3214, 0x3240, 0x3410, 0x4210 }; // The byte position we work on is '4'. uint32_t const byteSelector = byteSelection[ longAddressModulo ]; uint32_t const longCompare = compare; uint32_t const longValue = value; uint32_t longOldValue = * baseAddress; uint32_t longAssumed; uint8_t oldValue; do { // Select bytes from the old value and new value to construct a 32-bit value to use. uint32_t const replacement = __byte_perm( longOldValue, longValue, byteSelector ); uint32_t const comparison = __byte_perm( longOldValue, longCompare, byteSelector ); longAssumed = longOldValue; // Use 32-bit atomicCAS() to try and set the 8-bits we care about. longOldValue = ::atomicCAS( baseAddress, comparison, replacement ); // Grab the 8-bit portion we care about from the old value at address. oldValue = ( longOldValue >> ( 8 * longAddressModulo )) & 0xFF; } while ( compare == oldValue and longAssumed != longOldValue ); // Repeat until other three 8-bit values stabilize. return oldValue; } } extern "C" { __global__ void get_occ_vox( const float* in_data, // B * N * 3 const int* in_actual_numpoints, // B const int B, const int N, const float *d_coord_shift, // 3 const float *d_voxel_size, // 3 const int *d_grid_size, // 3 const int *kernel_size, // 3 const int pixel_size, const int grid_size_vol, uint8_t *coor_occ, // B * 400 * 400 * 400 int8_t *loc_coor_counter, // B * 400 * 400 * 400 int *near_depth_id_tensor, // B * 400 * 400 int *far_depth_id_tensor, // B * 400 * 400 const int inverse ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / N; // index of batch if (i_batch >= B) { return; } int i_pt = index - N * i_batch; if (i_pt < in_actual_numpoints[i_batch]) { int coor[3]; const float *p_pt = in_data + index * 3; coor[0] = floor((p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]); if (coor[0] < 0 || coor[0] >= d_grid_size[0]) { return; } coor[1] = floor((p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]); if (coor[1] < 0 || coor[1] >= d_grid_size[1]) { return; } float z = p_pt[2]; if (inverse > 0){ z = 1.0 / z;} coor[2] = floor((z - d_coord_shift[2]) / d_voxel_size[2]); if (coor[2] < 0 || coor[2] >= d_grid_size[2]) { return; } int frust_id_b, coor_indx_b = i_batch * grid_size_vol + coor[0] * (d_grid_size[1] * d_grid_size[2]) + coor[1] * d_grid_size[2] + coor[2]; if (loc_coor_counter[coor_indx_b] < (int8_t)0 || cuda::atomicAdd(loc_coor_counter + coor_indx_b, (int8_t)-1) < (int8_t)0) { return; } for (int coor_x = max(0, coor[0] - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], coor[0] + (kernel_size[0] + 1) / 2); coor_x++) { for (int coor_y = max(0, coor[1] - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], coor[1] + (kernel_size[1] + 1) / 2); coor_y++) { for (int coor_z = max(0, coor[2] - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], coor[2] + (kernel_size[2] + 1) / 2); coor_z++) { frust_id_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y; coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z; if (coor_occ[coor_indx_b] > (uint8_t)0) { continue; } cuda::atomicCAS(coor_occ + coor_indx_b, (uint8_t)0, (uint8_t)1); atomicMin(near_depth_id_tensor + frust_id_b, coor_z); atomicMax(far_depth_id_tensor + frust_id_b, coor_z); } } } } } __global__ void near_vox_full( const int B, const int SR, const int *pixel_idx, const int R, const int *vscale, const int *d_grid_size, const int pixel_size, const int grid_size_vol, const int *kernel_size, // 3 uint8_t *pixel_map, int8_t *ray_mask, // B * R const uint8_t *coor_occ, // B * 400 * 400 * 400 int8_t *loc_coor_counter, // B * 400 * 400 * 400 const int *near_depth_id_tensor, // B * 400 * 400 const int *far_depth_id_tensor, // B * 400 * 400 short *voxel_to_coorz_idx // B * 400 * 400 * SR ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / R; // index of batch if (i_batch >= B) { return; } int vx_id = pixel_idx[index*2] / vscale[0], vy_id = pixel_idx[index*2 + 1] / vscale[1]; int i_xyvox_id = i_batch * pixel_size + vx_id * d_grid_size[1] + vy_id; int near_id = near_depth_id_tensor[i_xyvox_id], far_id = far_depth_id_tensor[i_xyvox_id]; ray_mask[index] = far_id > 0 ? (int8_t)1 : (int8_t)0; if (pixel_map[i_xyvox_id] > (uint8_t)0 || cuda::atomicCAS(pixel_map + i_xyvox_id, (uint8_t)0, (uint8_t)1) > (uint8_t)0) { return; } int counter = 0; for (int depth_id = near_id; depth_id <= far_id; depth_id++) { if (coor_occ[i_xyvox_id * d_grid_size[2] + depth_id] > (uint8_t)0) { voxel_to_coorz_idx[i_xyvox_id * SR + counter] = (short)depth_id; // if (i_xyvox_id>81920){ // printf(" %d %d %d %d %d %d %d %d %d %d ", pixel_idx[index*2], vscale[0], i_batch, vx_id, vy_id, i_xyvox_id * SR + counter, i_xyvox_id, SR, counter, d_grid_size[1]); // } for (int coor_x = max(0, vx_id - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], vx_id + (kernel_size[0] + 1) / 2); coor_x++) { for (int coor_y = max(0, vy_id - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], vy_id + (kernel_size[1] + 1) / 2); coor_y++) { for (int coor_z = max(0, depth_id - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], depth_id + (kernel_size[2] + 1) / 2); coor_z++) { int coor_indx_b = i_batch * grid_size_vol + coor_x * (d_grid_size[1] * d_grid_size[2]) + coor_y * d_grid_size[2] + coor_z; // cuda::atomicCAS(loc_coor_counter + coor_indx_b, (int8_t)-1, (int8_t)1); int8_t loc = loc_coor_counter[coor_indx_b]; if (loc < (int8_t)0) { loc_coor_counter[coor_indx_b] = (int8_t)1; } } } } if (counter >= SR - 1) { return; } counter += 1; } } } __global__ void insert_vox_points( float* in_data, // B * N * 3 int* in_actual_numpoints, // B const int B, const int N, const int P, const int max_o, const int pixel_size, const int grid_size_vol, const float *d_coord_shift, // 3 const int *d_grid_size, const float *d_voxel_size, // 3 const int8_t *loc_coor_counter, // B * 400 * 400 * 400 short *voxel_pnt_counter, // B * 400 * 400 * max_o int *voxel_to_pntidx, // B * pixel_size * max_o * P unsigned long seconds, const int inverse ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / N; // index of batch if (i_batch >= B) { return; } if (index - i_batch * N < in_actual_numpoints[i_batch]) { const float *p_pt = in_data + index * 3; int coor_x = (p_pt[0] - d_coord_shift[0]) / d_voxel_size[0]; int coor_y = (p_pt[1] - d_coord_shift[1]) / d_voxel_size[1]; float z = p_pt[2]; if (inverse > 0){ z = 1.0 / z;} int coor_z = (z - d_coord_shift[2]) / d_voxel_size[2]; int pixel_indx_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y; int coor_indx_b = pixel_indx_b * d_grid_size[2] + coor_z; if (coor_x < 0 || coor_x >= d_grid_size[0] || coor_y < 0 || coor_y >= d_grid_size[1] || coor_z < 0 || coor_z >= d_grid_size[2] || loc_coor_counter[coor_indx_b] < (int8_t)0) { return; } int voxel_indx_b = pixel_indx_b * max_o + (int)loc_coor_counter[coor_indx_b]; //printf("voxel_indx_b, %d || ", voxel_indx_b); int voxel_pntid = (int) cuda::atomicAdd(voxel_pnt_counter + voxel_indx_b, (short)1); if (voxel_pntid < P) { voxel_to_pntidx[voxel_indx_b * P + voxel_pntid] = index; } else { curandState state; curand_init(index+seconds, 0, 0, &state); int insrtidx = ceilf(curand_uniform(&state) * (voxel_pntid+1)) - 1; if(insrtidx < P){ voxel_to_pntidx[voxel_indx_b * P + insrtidx] = index; } } } } __global__ void query_rand_along_ray( const float* in_data, // B * N * 3 const int B, const int SR, // num. samples along each ray e.g., 128 const int R, // e.g., 1024 const int max_o, const int P, const int K, // num. neighbors const int pixel_size, const int grid_size_vol, const float radius_limit2, const float depth_limit2, const float *d_coord_shift, // 3 const int *d_grid_size, const float *d_voxel_size, // 3 const float *d_ray_voxel_size, // 3 const int *vscale, // 3 const int *kernel_size, const int *pixel_idx, // B * R * 2 const int8_t *loc_coor_counter, // B * 400 * 400 * 400 const short *voxel_to_coorz_idx, // B * 400 * 400 * SR const short *voxel_pnt_counter, // B * 400 * 400 * max_o const int *voxel_to_pntidx, // B * pixel_size * max_o * P int *sample_pidx, // B * R * SR * K float *sample_loc, // B * R * SR * K unsigned long seconds, const int NN, const int inverse ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / (R * SR); // index of batch int ray_idx_b = index / SR; if (i_batch >= B || ray_idx_b >= B * R) { return; } int ray_sample_loc_idx = index - ray_idx_b * SR; int frustx = pixel_idx[ray_idx_b * 2] / vscale[0]; int frusty = pixel_idx[ray_idx_b * 2 + 1] / vscale[1]; int vxy_ind_b = i_batch * pixel_size + frustx * d_grid_size[1] + frusty; int frustz = (int) voxel_to_coorz_idx[vxy_ind_b * SR + ray_sample_loc_idx]; float centerx = d_coord_shift[0] + frustx * d_voxel_size[0] + (pixel_idx[ray_idx_b * 2] % vscale[0] + 0.5) * d_ray_voxel_size[0]; float centery = d_coord_shift[1] + frusty * d_voxel_size[1] + (pixel_idx[ray_idx_b * 2 + 1] % vscale[1] + 0.5) * d_ray_voxel_size[1]; float centerz = d_coord_shift[2] + (frustz + 0.5) * d_voxel_size[2]; if (inverse > 0){ centerz = 1.0 / centerz;} sample_loc[index * 3] = centerx; sample_loc[index * 3 + 1] = centery; sample_loc[index * 3 + 2] = centerz; if (frustz < 0) { return; } int coor_indx_b = vxy_ind_b * d_grid_size[2] + frustz; int raysample_startid = index * K; int kid = 0; curandState state; for (int coor_x = max(0, frustx - kernel_size[0] / 2) ; coor_x < min(d_grid_size[0], frustx + (kernel_size[0] + 1) / 2); coor_x++) { for (int coor_y = max(0, frusty - kernel_size[1] / 2) ; coor_y < min(d_grid_size[1], frusty + (kernel_size[1] + 1) / 2); coor_y++) { int pixel_indx_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y; for (int coor_z = max(0, frustz - kernel_size[2] / 2) ; coor_z < min(d_grid_size[2], frustz + (kernel_size[2] + 1) / 2); coor_z++) { int shift_coor_indx_b = pixel_indx_b * d_grid_size[2] + coor_z; if(loc_coor_counter[shift_coor_indx_b] < (int8_t)0) {continue;} int voxel_indx_b = pixel_indx_b * max_o + (int)loc_coor_counter[shift_coor_indx_b]; for (int g = 0; g < min(P, (int) voxel_pnt_counter[voxel_indx_b]); g++) { int pidx = voxel_to_pntidx[voxel_indx_b * P + g]; if ((radius_limit2 == 0 || (in_data[pidx*3]-centerx) * (in_data[pidx*3]-centerx) + (in_data[pidx*3 + 1]-centery) * (in_data[pidx*3 + 1]-centery) <= radius_limit2) && (depth_limit2==0 || (in_data[pidx*3 + 2]-centerz) * (in_data[pidx*3 + 2]-centerz) <= depth_limit2)) { if (kid++ < K) { sample_pidx[raysample_startid + kid - 1] = pidx; } else { curand_init(index+seconds, 0, 0, &state); int insrtidx = ceilf(curand_uniform(&state) * (kid)) - 1; if (insrtidx < K) { sample_pidx[raysample_startid + insrtidx] = pidx; } } } } } } } } __global__ void query_neigh_along_ray_layered( const float* in_data, // B * N * 3 const int B, const int SR, // num. samples along each ray e.g., 128 const int R, // e.g., 1024 const int max_o, const int P, const int K, // num. neighbors const int pixel_size, const int grid_size_vol, const float radius_limit2, const float depth_limit2, const float *d_coord_shift, // 3 const int *d_grid_size, const float *d_voxel_size, // 3 const float *d_ray_voxel_size, // 3 const int *vscale, // 3 const int *kernel_size, const int *pixel_idx, // B * R * 2 const int8_t *loc_coor_counter, // B * 400 * 400 * 400 const short *voxel_to_coorz_idx, // B * 400 * 400 * SR const short *voxel_pnt_counter, // B * 400 * 400 * max_o const int *voxel_to_pntidx, // B * pixel_size * max_o * P int *sample_pidx, // B * R * SR * K float *sample_loc, // B * R * SR * K unsigned long seconds, const int NN, const int inverse ) { int index = blockIdx.x * blockDim.x + threadIdx.x; // index of gpu thread int i_batch = index / (R * SR); // index of batch int ray_idx_b = index / SR; if (i_batch >= B || ray_idx_b >= B * R) { return; } int ray_sample_loc_idx = index - ray_idx_b * SR; int frustx = pixel_idx[ray_idx_b * 2] / vscale[0]; int frusty = pixel_idx[ray_idx_b * 2 + 1] / vscale[1]; int vxy_ind_b = i_batch * pixel_size + frustx * d_grid_size[1] + frusty; int frustz = (int) voxel_to_coorz_idx[vxy_ind_b * SR + ray_sample_loc_idx]; float centerx = d_coord_shift[0] + frustx * d_voxel_size[0] + (pixel_idx[ray_idx_b * 2] % vscale[0] + 0.5) * d_ray_voxel_size[0]; float centery = d_coord_shift[1] + frusty * d_voxel_size[1] + (pixel_idx[ray_idx_b * 2 + 1] % vscale[1] + 0.5) * d_ray_voxel_size[1]; float centerz = d_coord_shift[2] + (frustz + 0.5) * d_voxel_size[2]; if (inverse > 0){ centerz = 1.0 / centerz;} sample_loc[index * 3] = centerx; sample_loc[index * 3 + 1] = centery; sample_loc[index * 3 + 2] = centerz; if (frustz < 0) { return; } // int coor_indx_b = vxy_ind_b * d_grid_size[2] + frustz; int raysample_startid = index * K; // curandState state; int kid = 0, far_ind = 0, coor_z, coor_y, coor_x; float far2 = 0.0; float xyz2Buffer[KN]; for (int layer = 0; layer < (kernel_size[0]+1)/2; layer++){ int zlayer = min((kernel_size[2]+1)/2-1, layer); for (int x = max(-frustx, -layer); x < min(d_grid_size[0] - frustx, layer+1); x++) { for (int y = max(-frusty, -layer); y < min(d_grid_size[1] - frusty, layer+1); y++) { coor_y = frusty + y; coor_x = frustx + x; int pixel_indx_b = i_batch * pixel_size + coor_x * d_grid_size[1] + coor_y; for (int z = max(-frustz, -zlayer); z < min(d_grid_size[2] - frustz, zlayer + 1); z++) { // if (max(abs(x),abs(y)) != layer || abs(z) != zlayer) continue; if (max(abs(x),abs(y)) != layer && ((zlayer == layer) ? (abs(z) != zlayer) : 1)) continue; // if (max(abs(x),abs(y)) != layer) continue; coor_z = z + frustz; int shift_coor_indx_b = pixel_indx_b * d_grid_size[2] + coor_z; if(loc_coor_counter[shift_coor_indx_b] < (int8_t)0) {continue;} int voxel_indx_b = pixel_indx_b * max_o + (int)loc_coor_counter[shift_coor_indx_b]; for (int g = 0; g < min(P, (int) voxel_pnt_counter[voxel_indx_b]); g++) { int pidx = voxel_to_pntidx[voxel_indx_b * P + g]; float x_v = (NN < 2) ? (in_data[pidx*3]-centerx) : (in_data[pidx*3] * in_data[pidx*3+2]-centerx*centerz) ; float y_v = (NN < 2) ? (in_data[pidx*3+1]-centery) : (in_data[pidx*3+1] * in_data[pidx*3+2]-centery*centerz) ; float xy2 = x_v * x_v + y_v * y_v; float z2 = (in_data[pidx*3 + 2]-centerz) * (in_data[pidx*3 + 2]-centerz); float xyz2 = xy2 + z2; if ((radius_limit2 == 0 || xy2 <= radius_limit2) && (depth_limit2==0 || z2 <= depth_limit2)){ if (kid++ < K) { sample_pidx[raysample_startid + kid - 1] = pidx; xyz2Buffer[kid-1] = xyz2; if (xyz2 > far2){ far2 = xyz2; far_ind = kid - 1; } } else { if (xyz2 < far2) { sample_pidx[raysample_startid + far_ind] = pidx; xyz2Buffer[far_ind] = xyz2; far2 = xyz2; for (int i = 0; i < K; i++) { if (xyz2Buffer[i] > far2) { far2 = xyz2Buffer[i]; far_ind = i; } } } } } } } } } } } } """, no_extern_c=True) get_occ_vox = mod.get_function("get_occ_vox") near_vox_full = mod.get_function("near_vox_full") insert_vox_points = mod.get_function("insert_vox_points") query_along_ray = mod.get_function("query_neigh_along_ray_layered") if self.opt.NN > 0 else mod.get_function("query_rand_along_ray") return get_occ_vox, near_vox_full, insert_vox_points, query_along_ray def switch_pixel_id(self, pixel_idx_tensor, h): pixel_id = torch.cat([pixel_idx_tensor[..., 0:1], h - 1 - pixel_idx_tensor[..., 1:2]], dim=-1) # print("pixel_id", pixel_id.shape, torch.min(pixel_id, dim=-2)[0], torch.max(pixel_id, dim=-2)[0]) return pixel_id def query_grid_point_index(self, h, w, pixel_idx_tensor, point_xyz_pers_tensor, point_xyz_w_tensor, actual_numpoints_tensor, kernel_size_gpu, query_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, kMaxThreadsPerBlock = 1024): device = point_xyz_pers_tensor.device B, N = point_xyz_pers_tensor.shape[0], point_xyz_pers_tensor.shape[1] pixel_size = scaled_vdim[0] * scaled_vdim[1] grid_size_vol = pixel_size * scaled_vdim[2] d_coord_shift = range_gpu[:3] # ray_vsize_gpu = (vsize_gpu / vscale_gpu).astype(np.float32) pixel_idx_cur_tensor = pixel_idx_tensor.reshape(B, -1, 2).clone() R = pixel_idx_cur_tensor.shape[1] # print("kernel_size_gpu {}, SR {}, K {}, ranges {}, scaled_vsize {}, scaled_vdim {}, vscale {}, max_o {}, P {}, radius_limit {}, depth_limit {}, range_gpu {}, scaled_vsize_gpu {}, scaled_vdim_gpu {}, vscale_gpu {} ".format(kernel_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, scaled_vsize_gpu, scaled_vdim_gpu, vscale_gpu, pixel_idx_cur_tensor.shape)) # print("point_xyz_pers_tensor", ranges, scaled_vdim_gpu, torch.min(point_xyz_pers_tensor, dim=-2)[0], torch.max(point_xyz_pers_tensor, dim=-2)[0]) gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) coor_occ_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], scaled_vdim[2]], dtype=torch.uint8, device=device) loc_coor_counter_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], scaled_vdim[2]], dtype=torch.int8, device=device) near_depth_id_tensor = torch.full([B, scaled_vdim[0], scaled_vdim[1]], scaled_vdim[2], dtype=torch.int32, device=device) far_depth_id_tensor = torch.full([B, scaled_vdim[0], scaled_vdim[1]], -1, dtype=torch.int32, device=device) self.get_occ_vox( Holder(point_xyz_pers_tensor), Holder(actual_numpoints_tensor), np.int32(B), np.int32(N), d_coord_shift, scaled_vsize_gpu, scaled_vdim_gpu, query_size_gpu, np.int32(pixel_size), np.int32(grid_size_vol), Holder(coor_occ_tensor), Holder(loc_coor_counter_tensor), Holder(near_depth_id_tensor), Holder(far_depth_id_tensor), np.int32(self.inverse), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() # print("near_depth_id_tensor", torch.min(near_depth_id_tensor), torch.max(far_depth_id_tensor),torch.max(loc_coor_counter_tensor), torch.max(torch.sum(coor_occ_tensor, dim=-1)), B*scaled_vdim[0]* scaled_vdim[1]*SR, pixel_size, scaled_vdim, vscale, scaled_vdim_gpu) gridSize = int((B * R + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) voxel_to_coorz_idx_tensor = torch.full([B, scaled_vdim[0], scaled_vdim[1], SR], -1, dtype=torch.int16, device=device) pixel_map_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1]], dtype=torch.uint8, device=device) ray_mask_tensor = torch.zeros([B, R], dtype=torch.int8, device=device) self.near_vox_full( np.int32(B), np.int32(SR), # Holder(self.switch_pixel_id(pixel_idx_cur_tensor,h)), Holder(pixel_idx_cur_tensor), np.int32(R), vscale_gpu, scaled_vdim_gpu, np.int32(pixel_size), np.int32(grid_size_vol), query_size_gpu, Holder(pixel_map_tensor), Holder(ray_mask_tensor), Holder(coor_occ_tensor), Holder(loc_coor_counter_tensor), Holder(near_depth_id_tensor), Holder(far_depth_id_tensor), Holder(voxel_to_coorz_idx_tensor), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() # print("voxel_to_coorz_idx_tensor max", torch.max(torch.sum(voxel_to_coorz_idx_tensor > -1, dim=-1))) # print("scaled_vsize_gpu",scaled_vsize_gpu, scaled_vdim_gpu) # print("ray_mask_tensor",ray_mask_tensor.shape, torch.min(ray_mask_tensor), torch.max(ray_mask_tensor)) # print("pixel_idx_cur_tensor",pixel_idx_cur_tensor.shape, torch.min(pixel_idx_cur_tensor), torch.max(pixel_idx_cur_tensor)) pixel_id_num_tensor = torch.sum(ray_mask_tensor, dim=-1) pixel_idx_cur_tensor = torch.masked_select(pixel_idx_cur_tensor, (ray_mask_tensor > 0)[..., None].expand(-1, -1, 2)).reshape(1, -1, 2) del coor_occ_tensor, near_depth_id_tensor, far_depth_id_tensor, pixel_map_tensor R = torch.max(pixel_id_num_tensor).cpu().numpy() # print("loc_coor_counter_tensor",loc_coor_counter_tensor.shape) loc_coor_counter_tensor = (loc_coor_counter_tensor > 0).to(torch.int8) loc_coor_counter_tensor = loc_coor_counter_tensor * torch.cumsum(loc_coor_counter_tensor, dtype=torch.int8, dim=-1) - 1 if max_o is None: max_o = torch.max(loc_coor_counter_tensor).cpu().numpy().astype(np.int32) + 1 # print("max_o", max_o) voxel_pnt_counter_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], max_o], dtype=torch.int16, device=device) voxel_to_pntidx_tensor = torch.zeros([B, scaled_vdim[0], scaled_vdim[1], max_o, P], dtype=torch.int32, device=device) gridSize = int((B * N + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) ray_vsize_gpu = (scaled_vsize_gpu / vscale_gpu).astype(np.float32) seconds = time.time() self.insert_vox_points( Holder(point_xyz_pers_tensor), Holder(actual_numpoints_tensor), np.int32(B), np.int32(N), np.int32(P), np.int32(max_o), np.int32(pixel_size), np.int32(grid_size_vol), d_coord_shift, scaled_vdim_gpu, scaled_vsize_gpu, Holder(loc_coor_counter_tensor), Holder(voxel_pnt_counter_tensor), Holder(voxel_to_pntidx_tensor), np.uint64(seconds), np.int32(self.inverse), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() # print("loc_coor_counter_tensor",loc_coor_counter_tensor.shape, torch.min(loc_coor_counter_tensor), torch.max(loc_coor_counter_tensor)) # print("voxel_pnt_counter_tensor",voxel_pnt_counter_tensor.shape, torch.min(voxel_pnt_counter_tensor), torch.max(voxel_pnt_counter_tensor)) # print("voxel_to_pntidx_tensor",voxel_to_pntidx_tensor.shape, torch.min(voxel_to_pntidx_tensor), torch.max(voxel_to_pntidx_tensor)) sample_pidx_tensor = torch.full([B, R, SR, K], -1, dtype=torch.int32, device=device) sample_loc_tensor = torch.full([B, R, SR, 3], 0.0, dtype=torch.float32, device=device) gridSize = int((R * SR + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock) seconds = time.time() # print(point_xyz_pers_tensor.shape, B, SR, R ,max_o, P, K, pixel_size, grid_size_vol, radius_limit, depth_limit, d_coord_shift, scaled_vdim_gpu, scaled_vsize_gpu, ray_vsize_gpu, vscale_gpu, kernel_size_gpu, pixel_idx_cur_tensor.shape, loc_coor_counter_tensor.shape, voxel_to_coorz_idx_tensor.shape, voxel_pnt_counter_tensor.shape, voxel_to_pntidx_tensor.shape, sample_pidx_tensor.shape, sample_loc_tensor.shape, gridSize) if R > 0: self.query_along_ray( Holder(point_xyz_pers_tensor), np.int32(B), np.int32(SR), np.int32(R), np.int32(max_o), np.int32(P), np.int32(K), np.int32(pixel_size), np.int32(grid_size_vol), np.float32(radius_limit ** 2), np.float32(depth_limit ** 2), d_coord_shift, scaled_vdim_gpu, scaled_vsize_gpu, ray_vsize_gpu, vscale_gpu, kernel_size_gpu, # Holder(self.switch_pixel_id(pixel_idx_cur_tensor,h)), Holder(pixel_idx_cur_tensor), Holder(loc_coor_counter_tensor), Holder(voxel_to_coorz_idx_tensor), Holder(voxel_pnt_counter_tensor), Holder(voxel_to_pntidx_tensor), Holder(sample_pidx_tensor), Holder(sample_loc_tensor), np.uint64(seconds), np.int32(self.opt.NN), np.int32(self.inverse), block=(kMaxThreadsPerBlock, 1, 1), grid=(gridSize, 1)) # torch.cuda.synchronize() # print("max_o", max_o) # print("voxel_pnt_counter", torch.max(voxel_pnt_counter_tensor)) # print("sample_pidx_tensor", torch.max(torch.sum(sample_pidx_tensor >= 0, dim=-1))) # print("sample_pidx_tensor min max", torch.min(sample_pidx_tensor), torch.max(sample_pidx_tensor)) # print("sample_pidx_tensor", sample_pidx_tensor.shape, sample_pidx_tensor[0,80,3], sample_pidx_tensor[0,80,6], sample_pidx_tensor[0,80,9]) # print("sample_pidx_tensor, sample_loc_tensor, pixel_idx_cur_tensor, ray_mask_tensor", sample_pidx_tensor.shape, sample_loc_tensor.shape, pixel_idx_cur_tensor.shape, ray_mask_tensor.shape) return sample_pidx_tensor, sample_loc_tensor, pixel_idx_cur_tensor, ray_mask_tensor def load_pnts(point_path, point_num): with open(point_path, 'rb') as f: print("point_file_path################", point_path) all_infos = pickle.load(f) point_xyz = all_infos["point_xyz"] print(len(point_xyz), point_xyz.dtype, np.mean(point_xyz, axis=0), np.min(point_xyz, axis=0), np.max(point_xyz, axis=0)) np.random.shuffle(point_xyz) return point_xyz[:min(len(point_xyz), point_num), :] def np_to_gpuarray(*args): result = [] for x in args: if isinstance(x, np.ndarray): result.append(pycuda.gpuarray.to_gpu(x)) else: print("trans",x) return result def try_build(point_file, point_dir, ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj, radius_limit, depth_limit, split=["train"], imgidx=0, gpu=0): point_path = os.path.join(point_dir, point_file) point_xyz = load_pnts(point_path, 819200000) # 81920 233872 imgs, poses, _, hwf, _ = load_blender_data( os.path.expandvars("${nrDataRoot}") + "/nerf/nerf_synthetic/{}".format(obj), split, half_res=False, testskip=1) H, W, focal = hwf plt.figure() plt.imshow(imgs[imgidx]) point_xyz_pers = w2img(point_xyz, poses[imgidx], focal) point_xyz_tensor = torch.as_tensor(point_xyz, device="cuda:{}".format(gpu))[None, ...] # plt.show() point_xyz_pers_tensor = torch.as_tensor(point_xyz_pers, device="cuda:{}".format(gpu))[None, ...] actual_numpoints_tensor = torch.ones([1], device=point_xyz_tensor.device, dtype=torch.int32) * len(point_xyz) scaled_vsize = (vsize * vscale).astype(np.float32) scaled_vdim = np.ceil(vdim / vscale).astype(np.int32) print("vsize", vsize, "vdim", vdim, "scaled_vdim", scaled_vdim) range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, kernel_size_gpu = np_to_gpuarray(ranges, scaled_vsize, scaled_vdim, vscale, kernel_size) pixel_idx_tensor = torch.as_tensor(pixel_idx, device="cuda:{}".format(gpu), dtype=torch.int32)[None, ...] sample_pidx_tensor, pixel_idx_cur_tensor = build_grid_point_index(pixel_idx_tensor, point_xyz_pers_tensor, actual_numpoints_tensor, kernel_size_gpu, SR, K, ranges, scaled_vsize, scaled_vdim, vscale, max_o, P, radius_limit, depth_limit, range_gpu, vsize_gpu, vdim_gpu, vscale_gpu, gpu=gpu) save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor, pixel_idx_cur_tensor, vdim, vsize, ranges) def w2img(point_xyz, transform_matrix, focal): camrot = transform_matrix[:3, :3] # world 2 cam campos = transform_matrix[:3, 3] # point_xyz_shift = point_xyz - campos[None, :] # xyz = np.sum(point_xyz_shift[:,None,:] * camrot.T, axis=-1) xyz = np.sum(camrot[None, ...] * point_xyz_shift[:, :, None], axis=-2) # print(xyz.shape, np.sum(camrot[None, None, ...] * point_xyz_shift[:,:,None], axis=-2).shape) xper = xyz[:, 0] / -xyz[:, 2] yper = xyz[:, 1] / xyz[:, 2] x_pixel = np.round(xper * focal + 400).astype(np.int32) y_pixel = np.round(yper * focal + 400).astype(np.int32) print("focal", focal, np.tan(.5 * 0.6911112070083618)) print("pixel xmax xmin:", np.max(x_pixel), np.min(x_pixel), "pixel ymax ymin:", np.max(y_pixel), np.min(y_pixel)) print("per xmax xmin:", np.max(xper), np.min(xper), "per ymax ymin:", np.max(yper), np.min(yper), "per zmax zmin:", np.max(xyz[:, 2]), np.min(xyz[:, 2])) print("min perx", -400 / focal, "max perx", 400 / focal) background = np.ones([800, 800, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .2 plt.figure() plt.imshow(background) return np.stack([xper, yper, -xyz[:, 2]], axis=-1) def render_mask_pers_points(queried_point_xyz, vsize, ranges, w, h): pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32) print(pixel_xy_inds.shape) y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0] background = np.ones([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .5 plt.figure() plt.imshow(background) def save_mask_pers_points(queried_point_xyz, vsize, ranges, w, h): pixel_xy_inds = np.floor((queried_point_xyz[:, :2] - ranges[None, :2]) / vsize[None, :2]).astype(np.int32) print(pixel_xy_inds.shape) y_pixel, x_pixel = pixel_xy_inds[:, 1], pixel_xy_inds[:, 0] background = np.ones([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .5 image_dir = os.path.join(self.opt.checkpoints_dir, opt.name, 'images') image_file = os.path.join(image_dir) def render_pixel_mask(pixel_xy_inds, w, h): y_pixel, x_pixel = pixel_xy_inds[0, :, 1], pixel_xy_inds[0, :, 0] background = np.ones([h, w, 3], dtype=np.float32) background[y_pixel, x_pixel, :] = .0 plt.figure() plt.imshow(background) def save_queried_points(point_xyz_tensor, point_xyz_pers_tensor, sample_pidx_tensor, pixel_idx_tensor, pixel_idx_cur_tensor, vdim, vsize, ranges): B, R, SR, K = sample_pidx_tensor.shape # pixel_inds = torch.as_tensor([3210, 3217,3218,3219,3220, 3221,3222,3223,3224,3225,3226,3227,3228,3229,3230, 3231,3232,3233,3234,3235, 3236,3237,3238,3239,3240], device=sample_pidx_tensor.device, dtype=torch.int64) point_inds = sample_pidx_tensor[0, :, :, :] # point_inds = sample_pidx_tensor[0, pixel_inds, :, :] mask = point_inds > -1 point_inds = torch.masked_select(point_inds, mask).to(torch.int64) queried_point_xyz_tensor = point_xyz_tensor[0, point_inds, :] queried_point_xyz = queried_point_xyz_tensor.cpu().numpy() print("queried_point_xyz.shape", B, R, SR, K, point_inds.shape, queried_point_xyz_tensor.shape, queried_point_xyz.shape) print("pixel_idx_cur_tensor", pixel_idx_cur_tensor.shape) render_pixel_mask(pixel_idx_cur_tensor.cpu().numpy(), vdim[0], vdim[1]) render_mask_pers_points(point_xyz_pers_tensor[0, point_inds, :].cpu().numpy(), vsize, ranges, vdim[0], vdim[1]) plt.show() if __name__ == "__main__": obj = "lego" point_file = "{}.pkl".format(obj) point_dir = os.path.expandvars("${nrDataRoot}/nerf/nerf_synthetic_points/") r = 0.36000002589322094 ranges = np.array([-r, -r, 2., r, r, 6.], dtype=np.float32) vdim = np.array([800, 800, 400], dtype=np.int32) vsize = np.array([2 * r / vdim[0], 2 * r / vdim[1], 4. / vdim[2]], dtype=np.float32) vscale = np.array([2, 2, 1], dtype=np.int32) SR = 24 P = 16 kernel_size = np.array([5, 5, 1], dtype=np.int32) radius_limit = 0 # r / 400 * 5 #r / 400 * 5 depth_limit = 0 # 4. / 400 * 1.5 # r / 400 * 2 max_o = None K = 32 xrange = np.arange(0, 800, 1, dtype=np.int32) yrange = np.arange(0, 800, 1, dtype=np.int32) xv, yv = np.meshgrid(xrange, yrange, sparse=False, indexing='ij') pixel_idx = np.stack([xv, yv], axis=-1).reshape(-1, 2) # 20000 * 2 gpu = 0 imgidx = 3 split = ["train"] if gpu < 0: import pycuda.autoinit else: drv.init() dev1 = drv.Device(gpu) ctx1 = dev1.make_context() try_build(point_file, point_dir, ranges, vsize, vdim, vscale, max_o, P, kernel_size, SR, K, pixel_idx, obj, radius_limit, depth_limit, split=split, imgidx=imgidx, gpu=0)
55,075
57.467091
475
py
pointnerf
pointnerf-master/models/depth_estimators/mvsnet.py
import torch import torch.nn as nn import torch.nn.functional as F from .module import * class FeatureNet(nn.Module): def __init__(self): super(FeatureNet, self).__init__() self.inplanes = 32 self.conv0 = ConvBnReLU(3, 8, 3, 1, 1) self.conv1 = ConvBnReLU(8, 8, 3, 1, 1) self.conv2 = ConvBnReLU(8, 16, 5, 2, 2) self.conv3 = ConvBnReLU(16, 16, 3, 1, 1) self.conv4 = ConvBnReLU(16, 16, 3, 1, 1) self.conv5 = ConvBnReLU(16, 32, 5, 2, 2) self.conv6 = ConvBnReLU(32, 32, 3, 1, 1) self.feature = nn.Conv2d(32, 32, 3, 1, 1) def forward(self, x): x = self.conv1(self.conv0(x)) x = self.conv4(self.conv3(self.conv2(x))) x = self.feature(self.conv6(self.conv5(x))) return x class CostRegNet(nn.Module): def __init__(self): super(CostRegNet, self).__init__() self.conv0 = ConvBnReLU3D(32, 8) self.conv1 = ConvBnReLU3D(8, 16, stride=2) self.conv2 = ConvBnReLU3D(16, 16) self.conv3 = ConvBnReLU3D(16, 32, stride=2) self.conv4 = ConvBnReLU3D(32, 32) self.conv5 = ConvBnReLU3D(32, 64, stride=2) self.conv6 = ConvBnReLU3D(64, 64) self.conv7 = nn.Sequential( nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(32), nn.ReLU(inplace=True)) self.conv9 = nn.Sequential( nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(16), nn.ReLU(inplace=True)) self.conv11 = nn.Sequential( nn.ConvTranspose3d(16, 8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(8), nn.ReLU(inplace=True)) self.prob = nn.Conv3d(8, 1, 3, stride=1, padding=1) def forward(self, x): conv0 = self.conv0(x) conv2 = self.conv2(self.conv1(conv0)) conv4 = self.conv4(self.conv3(conv2)) x = self.conv6(self.conv5(conv4)) x = conv4 + self.conv7(x) x = conv2 + self.conv9(x) x = conv0 + self.conv11(x) x = self.prob(x) return x class RefineNet(nn.Module): def __init__(self): super(RefineNet, self).__init__() self.conv1 = ConvBnReLU(4, 32) self.conv2 = ConvBnReLU(32, 32) self.conv3 = ConvBnReLU(32, 32) self.res = ConvBnReLU(32, 1) def forward(self, img, depth_init): concat = F.cat((img, depth_init), dim=1) depth_residual = self.res(self.conv3(self.conv2(self.conv1(concat)))) depth_refined = depth_init + depth_residual return depth_refined class MVSNet(nn.Module): def __init__(self, refine=False): super(MVSNet, self).__init__() self.refine = refine self.feature = FeatureNet() self.cost_regularization = CostRegNet() if self.refine: self.refine_network = RefineNet() def forward(self, imgs, proj_matrices, depth_values, features=None, prob_only=False): imgs = torch.unbind(imgs, 1) num_depth = depth_values.shape[1] num_views = len(imgs) # step 1. feature extraction # in: images; out: 32-channel feature maps if features is None: features = [self.feature(img) for img in imgs] # step 2. differentiable homograph, build cost volume volume_sum = 0 volume_sq_sum = 0 for vid in range(num_views): # warpped features warped_volume = homo_warping(features[vid], proj_matrices[:, vid], depth_values) if self.training: volume_sum = volume_sum + warped_volume volume_sq_sum = volume_sq_sum + warped_volume ** 2 else: volume_sum += warped_volume volume_sq_sum += warped_volume.pow_(2) # the memory of warped_volume has been modified del warped_volume volume_variance = volume_sq_sum.div_(num_views).sub_(volume_sum.div_(num_views).pow_(2)) # step 3. cost volume regularization cost_reg = self.cost_regularization(volume_variance) cost_reg = cost_reg.squeeze(1) prob_volume = F.softmax(cost_reg, dim=1) if prob_only: return features, prob_volume, cost_reg depth = depth_regression(prob_volume, depth_values=depth_values) with torch.no_grad(): # photometric confidence prob_volume_sum4 = 4 * F.avg_pool3d(F.pad(prob_volume.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0).squeeze(1) depth_index = depth_regression(prob_volume, depth_values=torch.arange(num_depth, device=prob_volume.device, dtype=torch.float)).long() photometric_confidence = torch.gather(prob_volume_sum4, 1, depth_index.unsqueeze(1)).squeeze(1) # step 4. depth map refinement if not self.refine: return depth, photometric_confidence, features, prob_volume # {"depth": depth, "photometric_confidence": photometric_confidence} else: refined_depth = self.refine_network(torch.cat((imgs[0], depth), 1)) return {"depth": depth, "refined_depth": refined_depth, "photometric_confidence": photometric_confidence} def mvsnet_loss(depth_est, depth_gt, mask): mask = mask > 0.5 return F.smooth_l1_loss(depth_est[mask], depth_gt[mask], size_average=True)
5,505
35.95302
147
py
pointnerf
pointnerf-master/models/depth_estimators/module.py
import torch import torch.nn as nn import torch.nn.functional as F class ConvBnReLU(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1): super(ConvBnReLU, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = nn.BatchNorm2d(out_channels) def forward(self, x): return F.relu(self.bn(self.conv(x)), inplace=True) class ConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1): super(ConvBn, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = nn.BatchNorm2d(out_channels) def forward(self, x): return self.bn(self.conv(x)) class ConvBnReLU3D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1): super(ConvBnReLU3D, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = nn.BatchNorm3d(out_channels) def forward(self, x): return F.relu(self.bn(self.conv(x)), inplace=True) def homo_warping(src_fea, proj, depth_values): # src_fea: [B, C, H, W] # src_proj: [B, 4, 4] # ref_proj: [B, 4, 4] # depth_values: [B, Ndepth] # out: [B, C, Ndepth, H, W] batch, channels = src_fea.shape[0], src_fea.shape[1] num_depth = depth_values.shape[1] height, width = src_fea.shape[2], src_fea.shape[3] with torch.no_grad(): rot = proj[:, :3, :3] # [B,3,3] trans = proj[:, :3, 3:4] # [B,3,1] y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=src_fea.device), torch.arange(0, width, dtype=torch.float32, device=src_fea.device)]) y, x = y.contiguous(), x.contiguous() y, x = y.view(height * width), x.view(height * width) xyz = torch.stack((x, y, torch.ones_like(x))) # [3, H*W] xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1) # [B, 3, H*W] rot_xyz = torch.matmul(rot, xyz) # [B, 3, H*W] rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_values.view(batch, 1, num_depth, 1) # [B, 3, Ndepth, H*W] proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1) # [B, 3, Ndepth, H*W] proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :] # [B, 2, Ndepth, H*W] proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1 proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1 proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3) # [B, Ndepth, H*W, 2] grid = proj_xy warped_src_fea = F.grid_sample(src_fea, grid.view(batch, num_depth * height, width, 2), mode='bilinear', padding_mode='zeros') warped_src_fea = warped_src_fea.view(batch, channels, num_depth, height, width) return warped_src_fea def depth_regression(p, depth_values): # p: probability volume [B, D, H, W] # depth_values: discrete depth values [B, D] depth_values = depth_values.view(*depth_values.shape, 1, 1) depth = torch.sum(p * depth_values, 1) return depth if __name__ == "__main__": # some testing code, just IGNORE it from datasets import find_dataset_def from torch.utils.data import DataLoader import numpy as np import cv2 MVSDataset = find_dataset_def("dtu_yao") dataset = MVSDataset("/home/xyguo/dataset/dtu_mvs/processed/mvs_training/dtu/", '../lists/dtu/train.txt', 'train', 3, 256) dataloader = DataLoader(dataset, batch_size=2) item = next(iter(dataloader)) imgs = item["imgs"][:, :, :, ::4, ::4].cuda() proj_matrices = item["proj_matrices"].cuda() mask = item["mask"].cuda() depth = item["depth"].cuda() depth_values = item["depth_values"].cuda() imgs = torch.unbind(imgs, 1) proj_matrices = torch.unbind(proj_matrices, 1) ref_img, src_imgs = imgs[0], imgs[1:] ref_proj, src_projs = proj_matrices[0], proj_matrices[1:] warped_imgs = homo_warping(src_imgs[0], src_projs[0], ref_proj, depth_values) cv2.imwrite('../tmp/ref.png', ref_img.permute([0, 2, 3, 1])[0].detach().cpu().numpy()[:, :, ::-1] * 255) cv2.imwrite('../tmp/src.png', src_imgs[0].permute([0, 2, 3, 1])[0].detach().cpu().numpy()[:, :, ::-1] * 255) for i in range(warped_imgs.shape[2]): warped_img = warped_imgs[:, :, i, :, :].permute([0, 2, 3, 1]).contiguous() img_np = warped_img[0].detach().cpu().numpy() cv2.imwrite('../tmp/tmp{}.png'.format(i), img_np[:, :, ::-1] * 255) # generate gt def tocpu(x): return x.detach().cpu().numpy().copy() ref_img = tocpu(ref_img)[0].transpose([1, 2, 0]) src_imgs = [tocpu(x)[0].transpose([1, 2, 0]) for x in src_imgs] ref_proj_mat = tocpu(ref_proj)[0] src_proj_mats = [tocpu(x)[0] for x in src_projs] mask = tocpu(mask)[0] depth = tocpu(depth)[0] depth_values = tocpu(depth_values)[0] for i, D in enumerate(depth_values): height = ref_img.shape[0] width = ref_img.shape[1] xx, yy = np.meshgrid(np.arange(0, width), np.arange(0, height)) print("yy", yy.max(), yy.min()) yy = yy.reshape([-1]) xx = xx.reshape([-1]) X = np.vstack((xx, yy, np.ones_like(xx))) # D = depth.reshape([-1]) # print("X", "D", X.shape, D.shape) X = np.vstack((X * D, np.ones_like(xx))) X = np.matmul(np.linalg.inv(ref_proj_mat), X) X = np.matmul(src_proj_mats[0], X) X /= X[2] X = X[:2] yy = X[0].reshape([height, width]).astype(np.float32) xx = X[1].reshape([height, width]).astype(np.float32) warped = cv2.remap(src_imgs[0], yy, xx, interpolation=cv2.INTER_LINEAR) # warped[mask[:, :] < 0.5] = 0 cv2.imwrite('../tmp/tmp{}_gt.png'.format(i), warped[:, :, ::-1] * 255)
6,155
39.235294
118
py
pointnerf
pointnerf-master/models/helpers/geometrics.py
import torch def homogenize(m): """Adds homogeneous coordinates to a [..., N,N] matrix, returning [..., N+1, N+1].""" assert m.shape[-1] == m.shape[-2] # Must be square n = m.shape[-1] eye_n_plus_1 = torch.eye(n + 1).cuda().expand(list(m.shape[:-2]) + [-1, -1]) extra_col = eye_n_plus_1[..., :-1, -1:] extra_row = eye_n_plus_1[..., -1:, :] including_col = torch.cat([m, extra_col], dim=-1) return torch.cat([including_col, extra_row], dim=-2) def compute_world2local_dist(dists, radii, rotations): """Computes a transformation to the local element frames for encoding.""" # We assume the center is an XYZ position for this transformation: # TODO(kgenova) Update this transformation to account for rotation. # assert len(dists.shape) == 3 # batch_size, element_count = dists.shape[:2] # eye_3x3 = torch.eye(3).cuda().expand([batch_size, element_count, -1, -1]) # eye_4x4 = torch.eye(4).cuda().expand([batch_size, element_count, -1, -1]) # Centering transform # ones = torch.ones([batch_size, element_count, 1, 1]) dists = dists[..., None] # tx = torch.cat([eye_3x3, -dists], dim=-1) # tx = torch.cat([tx, eye_4x4[..., 3:4, :]], dim=-2) # Append last row # Compute the inverse rotation: rotation = roll_pitch_yaw_to_rotation_matrices(rotations) # torch.inverse(roll_pitch_yaw_to_rotation_matrices(rotations)) # print("rotation", rotation[0,0]) assert rotation.shape[-2:] == (3, 3) # Compute a scale transformation: diag = 1.0 / (radii + 1e-8) scale = torch.diag_embed(diag) # Apply both transformations and return the transformed points. tx3x3 = torch.matmul(scale, rotation) return torch.matmul(tx3x3, dists) #, torch.matmul(homogenize(tx3x3), tx) def roll_pitch_yaw_to_rotation_matrices(roll_pitch_yaw): """Converts roll-pitch-yaw angles to rotation matrices. Args: roll_pitch_yaw: Tensor with shape [..., 3]. The last dimension contains the roll, pitch, and yaw angles in radians. The resulting matrix rotates points by first applying roll around the x-axis, then pitch around the y-axis, then yaw around the z-axis. Returns: Tensor with shape [..., 3, 3]. The 3x3 rotation matrices corresponding to the input roll-pitch-yaw angles. """ cosines = torch.cos(roll_pitch_yaw) sines = torch.sin(roll_pitch_yaw) cx, cy, cz = torch.unbind(cosines, dim=-1) sx, sy, sz = torch.unbind(sines, dim=-1) # pyformat: disable rotation = torch.stack( [cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx, sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx, -sy, cy * sx, cy * cx], dim=-1) # pyformat: enable #shape = torch.cat([roll_pitch_yaw.shape[:-1], [3, 3]], axis=0) shape = list(roll_pitch_yaw.shape[:-1]) + [3, 3] rotation = torch.reshape(rotation, shape) return rotation
2,890
39.71831
125
py
pointnerf
pointnerf-master/models/helpers/networks.py
import torch import torch.nn as nn from torch.nn import init import functools from torch.optim import lr_scheduler import torch.nn.functional as F import numpy as np def get_nonlinearity_layer(activation_type='PReLU'): if activation_type == 'ReLU': nonlinearity_layer = nn.ReLU(True) elif activation_type == 'SELU': nonlinearity_layer = nn.SELU(True) elif activation_type == 'LeakyReLU': nonlinearity_layer = nn.LeakyReLU(0.1, True) elif activation_type == 'PReLU': nonlinearity_layer = nn.PReLU() else: raise NotImplementedError('activation layer [{}] is not found'.format(activation_type)) return nonlinearity_layer def get_norm_layer(norm_type='instance'): if norm_type == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True) elif norm_type == 'instance': norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) # norm_layer = functools.partial(nn.InstanceNorm2d, affine=True, track_running_stats=True) elif norm_type == 'group': norm_layer = functools.partial(nn.GroupNorm, num_groups=16, affine=True) elif norm_type == 'layer': norm_layer = nn.LayerNorm elif norm_type == 'none': norm_layer = None else: raise NotImplementedError('normalization layer [%s] is not found' % norm_type) return norm_layer def get_scheduler(optimizer, opt): if opt.lr_policy == 'lambda': def lambda_rule(it): lr_l = 1.0 - max(0, it - opt.niter) / float(opt.niter_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif opt.lr_policy == 'iter_exponential_decay': def lambda_rule(it): lr_l = pow(opt.lr_decay_exp, it / opt.lr_decay_iters) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'cosine_annealing': scheduler = CosineAnnealingLR(optimizer, T_max=self.args.num_epochs, eta_min=1e-7) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler def get_xavier_multiplier(m, gain): if isinstance(m, nn.Conv1d): ksize = m.kernel_size[0] n1 = m.in_channels n2 = m.out_channels std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize)) elif isinstance(m, nn.ConvTranspose1d): ksize = m.kernel_size[0] // m.stride[0] n1 = m.in_channels n2 = m.out_channels std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize)) elif isinstance(m, nn.Conv2d): ksize = m.kernel_size[0] * m.kernel_size[1] n1 = m.in_channels n2 = m.out_channels std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize)) elif isinstance(m, nn.ConvTranspose2d): ksize = m.kernel_size[0] * m.kernel_size[1] // m.stride[0] // m.stride[1] n1 = m.in_channels n2 = m.out_channels std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize)) elif isinstance(m, nn.Conv3d): ksize = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] n1 = m.in_channels n2 = m.out_channels std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize)) elif isinstance(m, nn.ConvTranspose3d): ksize = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] // m.stride[0] // m.stride[ 1] // m.stride[2] n1 = m.in_channels n2 = m.out_channels std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize)) elif isinstance(m, nn.Linear): n1 = m.in_features n2 = m.out_features std = gain * np.sqrt(2.0 / (n1 + n2)) else: return None return std def xavier_uniform_(m, gain): std = get_xavier_multiplier(m, gain) m.weight.data.uniform_(-std * np.sqrt(3.0), std * np.sqrt(3.0)) def init_weights(net, init_type='xavier_uniform', gain=1): def init_func(m): classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'xavier_uniform': xavier_uniform_(m, gain) elif init_type == 'normal': init.normal_(m.weight.data, 0.0, gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) else: raise NotImplementedError('initialization method [{}] is not implemented'.format(init_type)) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) # if classname == 'ConvTranspose2d': # m.weight.data[:, :, 0::2, 1::2] = m.weight.data[:, :, 0::2, 0::2] # m.weight.data[:, :, 1::2, 0::2] = m.weight.data[:, :, 0::2, 0::2] # m.weight.data[:, :, 1::2, 1::2] = m.weight.data[:, :, 0::2, 0::2] # elif classname == 'ConvTranspose3d': # m.weight.data[:, :, 0::2, 0::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] # m.weight.data[:, :, 0::2, 1::2, 0::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] # m.weight.data[:, :, 0::2, 1::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] # m.weight.data[:, :, 1::2, 0::2, 0::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] # m.weight.data[:, :, 1::2, 0::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] # m.weight.data[:, :, 1::2, 1::2, 0::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] # m.weight.data[:, :, 1::2, 1::2, 1::2] = m.weight.data[:, :, 0::2, 0::2, 0::2] net.apply(init_func) def init_seq(s, init_type='xavier_uniform'): '''initialize sequential model''' for a, b in zip(s[:-1], s[1:]): if isinstance(b, nn.ReLU): init_weights(a, init_type, nn.init.calculate_gain('relu')) elif isinstance(b, nn.LeakyReLU): init_weights(a, init_type, nn.init.calculate_gain('leaky_relu', b.negative_slope)) else: init_weights(a, init_type) init_weights(s[-1]) def positional_encoding(positions, freqs, ori=False): '''encode positions with positional encoding positions: :math:`(...,D)` freqs: int Return: pts: :math:`(..., 2DF)` ''' freq_bands = (2**torch.arange(freqs).float()).to(positions.device) # (F,) ori_c = positions.shape[-1] pts = (positions[..., None] * freq_bands).reshape(positions.shape[:-1] + (freqs * positions.shape[-1], )) # (..., DF) if ori: pts = torch.cat([positions, torch.sin(pts), torch.cos(pts)], dim=-1).reshape(pts.shape[:-1]+(pts.shape[-1]*2+ori_c,)) else: pts = torch.stack([torch.sin(pts), torch.cos(pts)], dim=-1).reshape(pts.shape[:-1]+(pts.shape[-1]*2,)) return pts
7,692
39.489474
125
py
pointnerf
pointnerf-master/models/rendering/diff_ray_marching.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np def find_ray_generation_method(name): assert isinstance(name, str), 'ray generation method name must be string' if name == 'cube': return cube_ray_generation elif name == 'near_far_linear': return near_far_linear_ray_generation elif name == 'near_far_disparity_linear': return near_far_disparity_linear_ray_generation elif name == 'nerf_near_far_disparity_linear': return nerf_near_far_disparity_linear_ray_generation elif name == 'nerf_near_far_linear': return nerf_near_far_linear_ray_generation elif name == 'near_middle_far': return near_middle_far_ray_generation raise RuntimeError('No such ray generation method: ' + name) def find_refined_ray_generation_method(name): assert isinstance(name, str), 'ray generation method name must be string' if name == 'cube': return refine_cube_ray_generation elif name.startswith('nerf'): return nerf_refine_ray_generation else: #hack default return refine_ray_generation raise RuntimeError('No such refined ray generation method: ' + name) def sample_pdf(in_bins, in_weights, n_samples, det=False): # bins: N x R x S x 1 # weights: N x R x s x 1 in_shape = in_bins.shape device = in_weights.device bins = in_bins.data.cpu().numpy().reshape([-1, in_shape[2]]) bins = 0.5 * (bins[..., 1:] + bins[..., :-1]) # bins: [NR x (S-1)] weights = in_weights.data.cpu().numpy().reshape([-1, in_shape[2]]) weights = weights[..., 1:-1] # weights: [NR x (S-2)] weights += 1e-5 pdf = weights / np.sum(weights, axis=-1, keepdims=True) cdf = np.cumsum(pdf, axis=-1) cdf = np.concatenate([np.zeros_like(cdf[..., :1]), cdf], -1) # cdf: [NR x (S-1)] if det: ur = np.broadcast_to(np.linspace(0, 1, n_samples, dtype=np.float32), (cdf.shape[0], n_samples)) else: ur = np.random.rand(cdf.shape[0], n_samples).astype(np.float32) # u: [NR x S2] inds = np.stack( [np.searchsorted(a, i, side='right') for a, i in zip(cdf, ur)]) below = np.maximum(0, inds - 1) above = np.minimum(cdf.shape[-1] - 1, inds) cdf_below = np.take_along_axis(cdf, below, 1) cdf_above = np.take_along_axis(cdf, above, 1) bins_below = np.take_along_axis(bins, below, 1) bins_above = np.take_along_axis(bins, above, 1) denom = cdf_above - cdf_below denom = np.where(denom < 1e-5, np.ones_like(denom), denom) t = (ur - cdf_below) / denom samples = bins_below + t * (bins_above - bins_below) samples = torch.from_numpy(samples).view( (in_shape[0], in_shape[1], n_samples, 1)).to(device) samples = torch.cat([samples, in_bins.detach()], dim=-2) samples, _ = torch.sort(samples, dim=-2) samples = samples.detach() return samples # def sample_pdf(in_bins, in_weights, n_samples, det=False): # # bins: N x R x S x 1 # # weights: N x R x S x 1 # import tensorflow as tf # tf.config.set_visible_devices([], 'GPU') # ori_shape = in_bins.shape # device = in_weights.device # # bins: (N*R, S) # bins = tf.convert_to_tensor(in_bins.data.cpu().numpy().reshape((-1, in_bins.shape[-2]))) # weights = tf.convert_to_tensor(in_weights.data.cpu().numpy().reshape((-1, in_weights.shape[-2]))) # bins = 0.5 * (bins[..., 1:] + bins[..., :-1]) # weights = weights[..., 1:-1] # # Get pdf # weights += 1e-5 # prevent nans # pdf = weights / tf.reduce_sum(weights, -1, keepdims=True) # cdf = tf.cumsum(pdf, -1) # cdf = tf.concat([tf.zeros_like(cdf[..., :1]), cdf], -1) # # Take uniform samples # if det: # u = tf.linspace(0., 1., n_samples) # u = tf.broadcast_to(u, list(cdf.shape[:-1]) + [n_samples]) # else: # u = tf.random.uniform(list(cdf.shape[:-1]) + [n_samples]) # # Invert CDF # inds = tf.searchsorted(cdf, u, side='right') # below = tf.maximum(0, inds - 1) # above = tf.minimum(cdf.shape[-1] - 1, inds) # inds_g = tf.stack([below, above], -1) # cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape) - 2) # bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape) - 2) # denom = (cdf_g[..., 1] - cdf_g[..., 0]) # denom = tf.where(denom < 1e-5, tf.ones_like(denom), denom) # t = (u - cdf_g[..., 0]) / denom # samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) # # N x R x N_samples x 1 # samples = torch.from_numpy(samples.numpy()).view( # (in_bins.shape[0], in_bins.shape[1], n_samples, 1)).to(in_bins.device) # # print(samples[0,0,:, 0]) # # print(in_bins[0,0,:, 0]) # # N x R x (N_samples + S) x 1 # samples = torch.cat([samples, in_bins.detach()], dim=-2) # # samples = torch.cat([samples, in_bins.data], dim=-2) # samples, _ = torch.sort(samples, dim=-2) # samples = samples.detach() # return samples def near_middle_far_ray_generation(campos, raydir, point_count, near=0.1, middle=2, far=10, middle_split=0.6, jitter=0., **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # near: N x 1 x 1 # far: N x 1 x 1 # jitter: float in [0, 1), a fraction of step length # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples tvals = torch.linspace(0, 1, int(point_count * middle_split) + 1, device=campos.device).view(1, -1) vals0 = near * (1 - tvals) + middle * tvals # N x 1 x Sammples tvals = torch.linspace(0, 1, int(point_count * (1 - middle_split)) + 2, device=campos.device).view(1, -1) vals1 = 1. / (1. / middle * (1 - tvals) + 1. / far * tvals) # N x 1 x Sammples tvals = torch.cat([vals0, vals1], 2) segment_length = (tvals[..., 1:] - tvals[..., :-1]) * ( 1 + jitter * (torch.rand( (raydir.shape[0], raydir.shape[1], tvals.shape[-1] - 1), device=campos.device) - 0.5)) segment_length = segment_length[..., :point_count] end_point_ts = torch.cumsum(segment_length, dim=2) end_point_ts = torch.cat([ torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1), device=end_point_ts.device), end_point_ts ], dim=2) end_point_ts = near + end_point_ts middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2 raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None] valid = torch.ones_like(middle_point_ts, dtype=middle_point_ts.dtype, device=middle_point_ts.device) return raypos, segment_length, valid, middle_point_ts def near_far_disparity_linear_ray_generation(campos, raydir, point_count, near=0.1, far=10, jitter=0., **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # near: N x 1 x 1 # far: N x 1 x 1 # jitter: float in [0, 1), a fraction of step length # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples tvals = torch.linspace(0, 1, point_count + 1, device=campos.device).view(1, -1) tvals = 1. / (1. / near * (1 - tvals) + 1. / far * tvals) # N x 1 x Sammples segment_length = (tvals[..., 1:] - tvals[..., :-1]) * (1 + jitter * (torch.rand( (raydir.shape[0], raydir.shape[1], point_count), device=campos.device) - 0.5)) end_point_ts = torch.cumsum(segment_length, dim=2) end_point_ts = torch.cat([ torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1), device=end_point_ts.device), end_point_ts ], dim=2) end_point_ts = near + end_point_ts middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2 raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None] # print(tvals.shape, segment_length.shape, end_point_ts.shape, middle_point_ts.shape, raypos.shape) valid = torch.ones_like(middle_point_ts, dtype=middle_point_ts.dtype, device=middle_point_ts.device) # print("campos", campos.shape, campos[0]) # print("raydir", raydir.shape, raydir[0,0]) # print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0]) # print("raypos", raypos.shape, raypos[0,0]) return raypos, segment_length, valid, middle_point_ts def nerf_near_far_disparity_linear_ray_generation(campos, raydir, point_count, near=0.1, far=10, jitter=1., **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # near: N x 1 x 1 # far: N x 1 x 1 # jitter: float in [0, 1), a fraction of step length # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples tvals = torch.linspace(0, 1, point_count, device=campos.device).view(1, -1) tvals = 1. / (1. / near * (1 - tvals) + 1. / far * tvals) # N x 1 x Sammples if jitter > 0.0: mids = .5 * (tvals[..., 1:] + tvals[..., :-1]) upper = torch.cat([mids, tvals[..., -1:]], -1) lower = torch.cat([tvals[..., :1], mids], -1) t_rand = torch.rand([tvals.shape[0],raydir.shape[1],tvals.shape[2]], device=campos.device) tvals = lower + (upper - lower) * t_rand # print("tvals, {}, t_rand {}, mids {}, upper {}, lower {}".format(tvals.shape, t_rand.shape, mids.shape, upper.shape, lower.shape)) segment_length = torch.cat([tvals[..., 1:] - tvals[..., :-1], torch.full((tvals.shape[0], tvals.shape[1], 1), 1e10, device=tvals.device)], axis=-1) * torch.linalg.norm(raydir[..., None, :], axis=-1) # print("segment_length, {}".format(segment_length.shape)) raypos = campos[:, None, None, :] + raydir[:, :, None, :] * tvals[:, :, :, None] # print("raypos, {}, campos {}, raydir {}, tvals {}".format(raypos.shape, campos.shape, raydir.shape, tvals.shape)) # print("raypos", raypos[0]) valid = torch.ones_like(tvals, dtype=raypos.dtype, device=raypos.device) # print("campos", campos.shape, campos[0]) # print("raydir", raydir.shape, raydir[0,0]) # print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0]) # print("raypos", raypos.shape, raypos[0,0]) return raypos, segment_length, valid, tvals def nerf_near_far_linear_ray_generation(campos, raydir, point_count, near=0.1, far=10, jitter=1., **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # near: N x 1 x 1 # far: N x 1 x 1 # jitter: float in [0, 1), a fraction of step length # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples tvals = torch.linspace(0, 1, point_count, device=campos.device).view(1, -1) tvals = near * (1.-tvals) + far * (tvals) # N x 1 x Sammples if jitter > 0.0: mids = .5 * (tvals[..., 1:] + tvals[..., :-1]) upper = torch.cat([mids, tvals[..., -1:]], -1) lower = torch.cat([tvals[..., :1], mids], -1) t_rand = torch.rand([tvals.shape[0],raydir.shape[1],tvals.shape[2]], device=campos.device) tvals = lower + (upper - lower) * t_rand # print("tvals, {}, t_rand {}, mids {}, upper {}, lower {}".format(tvals.shape, t_rand.shape, mids.shape, upper.shape, lower.shape)) segment_length = torch.cat([tvals[..., 1:] - tvals[..., :-1], torch.full((tvals.shape[0], tvals.shape[1], 1), 1e10, device=tvals.device)], axis=-1) * torch.linalg.norm(raydir[..., None, :], axis=-1) raypos = campos[:, None, None, :] + raydir[:, :, None, :] * tvals[:, :, :, None] # print("raypos, {}, campos {}, raydir {}, tvals {}".format(raypos.shape, campos.shape, raydir.shape, tvals.shape)) # print("raypos", raypos[0]) valid = torch.ones_like(tvals, dtype=raypos.dtype, device=raypos.device) # print("campos", campos.shape, campos[0]) # print("raydir", raydir.shape, raydir[0,0]) # print("middle_point_ts", middle_point_ts.shape, middle_point_ts[0,0]) # print("raypos", raypos.shape, raypos[0,0]) return raypos, segment_length, valid, tvals def near_far_linear_ray_generation(campos, raydir, point_count, near=0.1, far=10, jitter=0., **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # near: N x 1 x 1 # far: N x 1 x 1 # jitter: float in [0, 1), a fraction of step length # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples # print("campos", campos.shape) # print("raydir", raydir.shape) tvals = torch.linspace(0, 1, point_count + 1, device=campos.device).view(1, -1) tvals = near * (1 - tvals) + far * tvals # N x 1 x Sammples segment_length = (tvals[..., 1:] - tvals[..., :-1]) * (1 + jitter * (torch.rand( (raydir.shape[0], raydir.shape[1], point_count), device=campos.device) - 0.5)) end_point_ts = torch.cumsum(segment_length, dim=2) end_point_ts = torch.cat([ torch.zeros((end_point_ts.shape[0], end_point_ts.shape[1], 1), device=end_point_ts.device), end_point_ts ], dim=2) end_point_ts = near + end_point_ts middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2 raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None] valid = torch.ones_like(middle_point_ts, dtype=middle_point_ts.dtype, device=middle_point_ts.device) segment_length*=torch.linalg.norm(raydir[..., None, :], axis=-1) return raypos, segment_length, valid, middle_point_ts def refine_ray_generation(campos, raydir, point_count, prev_ts, prev_weights, domain_size=1., jitter=0, **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # point_count: int # prev_ts: N x Rays x PrevSamples # prev_weights: N x Rays x PrevSamples # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples with torch.no_grad(): end_point_ts = sample_pdf(prev_ts[..., None], prev_weights, point_count + 1, jitter <= 0) end_point_ts = end_point_ts.view(end_point_ts.shape[:-1]) segment_length = end_point_ts[:, :, 1:] - end_point_ts[:, :, :-1] middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2 raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None] valid = torch.ones_like(middle_point_ts, dtype=middle_point_ts.dtype, device=middle_point_ts.device) segment_length*=torch.linalg.norm(raydir[..., None, :], axis=-1) return raypos, segment_length, valid, middle_point_ts def nerf_refine_ray_generation(campos, raydir, point_count, prev_ts, prev_weights, domain_size=1., jitter=0, **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # point_count: int # prev_ts: N x Rays x PrevSamples, uniformed depth segments between near and far # prev_weights: N x Rays x PrevSamples # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples with torch.no_grad(): end_point_ts = sample_pdf(prev_ts[..., None], prev_weights, point_count + 1, jitter <= 0) end_point_ts = end_point_ts.view(end_point_ts.shape[:-1]) segment_length = end_point_ts[:, :, 1:] - end_point_ts[:, :, :-1] segment_length *= torch.linalg.norm(raydir[..., None, :], axis=-1) middle_point_ts = (end_point_ts[:, :, :-1] + end_point_ts[:, :, 1:]) / 2 raypos = campos[:, None, None, :] + raydir[:, :, None, :] * middle_point_ts[:, :, :, None] valid = torch.ones_like(middle_point_ts, dtype=middle_point_ts.dtype, device=middle_point_ts.device) return raypos, segment_length, valid, middle_point_ts def refine_cube_ray_generation(campos, raydir, point_count, prev_ts, prev_weights, domain_size=1., jitter=0, **kargs): # inputs # campos: N x 3 # raydir: N x Rays x 3, must be normalized # point_count: int # prev_ts: N x Rays x PrevSamples # prev_weights: N x Rays x PrevSamples # outputs # raypos: N x Rays x Samples x 3 # segment_length: N x Rays x Samples # valid: N x Rays x Samples # ts: N x Rays x Samples with torch.no_grad(): raypos, segment_length, _, middle_point_ts \ = refine_ray_generation(campos, raydir, point_count, prev_ts, prev_weights, domain_size=domain_size, jitter=jitter, **kargs) valid = torch.prod(torch.gt(raypos, -domain_size) * torch.lt(raypos, domain_size), dim=-1).byte() return raypos, segment_length, valid, middle_point_ts def ray_march(ray_dist, ray_valid, ray_features, render_func, blend_func, bg_color=None): # ray_dist: N x Rays x Samples # ray_valid: N x Rays x Samples # ray_features: N x Rays x Samples x Features # Output # ray_color: N x Rays x 3 # point_color: N x Rays x Samples x 3 # opacity: N x Rays x Samples # acc_transmission: N x Rays x Samples # blend_weight: N x Rays x Samples x 1 # background_transmission: N x Rays x 1 point_color = render_func(ray_features) # we are essentially predicting predict 1 - e^-sigma sigma = ray_features[..., 0] * ray_valid.float() opacity = 1 - torch.exp(-sigma * ray_dist) # cumprod exclusive acc_transmission = torch.cumprod(1. - opacity + 1e-10, dim=-1) temp = torch.ones(opacity.shape[0:2] + (1, )).to( opacity.device).float() # N x R x 1 background_transmission = acc_transmission[:, :, [-1]] acc_transmission = torch.cat([temp, acc_transmission[:, :, :-1]], dim=-1) blend_weight = blend_func(opacity, acc_transmission)[..., None] ray_color = torch.sum(point_color * blend_weight, dim=-2, keepdim=False) if bg_color is not None: ray_color += bg_color.to(opacity.device).float().view( background_transmission.shape[0], 1, 3) * background_transmission # # # if point_color.shape[1] > 0 and (torch.any(torch.isinf(point_color)) or torch.any(torch.isnan(point_color))): # print("ray_color", torch.min(ray_color),torch.max(ray_color)) # print("background_transmission", torch.min(background_transmission), torch.max(background_transmission)) background_blend_weight = blend_func(1, background_transmission) # print("ray_color", torch.max(torch.abs(ray_color)), torch.max(torch.abs(sigma)), torch.max(torch.abs(opacity)),torch.max(torch.abs(acc_transmission)), torch.max(torch.abs(background_transmission)), torch.max(torch.abs(acc_transmission)), torch.max(torch.abs(background_blend_weight))) return ray_color, point_color, opacity, acc_transmission, blend_weight, \ background_transmission, background_blend_weight def alpha_ray_march(ray_dist, ray_valid, ray_features, blend_func): sigma = ray_features[..., 0] * ray_valid.float() opacity = 1 - torch.exp(-sigma * ray_dist) acc_transmission = torch.cumprod(1. - opacity + 1e-10, dim=-1) temp = torch.ones(opacity.shape[0:2] + (1, )).to( opacity.device).float() # N x R x 1 background_transmission = acc_transmission[:, :, [-1]] acc_transmission = torch.cat([temp, acc_transmission[:, :, :-1]], dim=-1) blend_weight = blend_func(opacity, acc_transmission)[..., None] background_blend_weight = blend_func(1, background_transmission) return opacity, acc_transmission, blend_weight, \ background_transmission, background_blend_weight
23,672
40.314136
290
py
pointnerf
pointnerf-master/models/rendering/diff_render_func.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from utils import format as fmt def find_render_function(name): if name == 'radiance': return radiance_render elif name == 'white': return white_color raise RuntimeError('Unknown render function: ' + name) def find_blend_function(name): if name == 'alpha': return alpha_blend elif name == 'alpha2': return alpha2_blend raise RuntimeError('Unknown blend function: ' + name) def find_tone_map(name): if name == 'gamma': return simple_tone_map elif name == 'normalize': return normalize_tone_map elif name == 'off': return no_tone_map raise RuntimeError('Unknown blend function: ' + name) def alpha_blend(opacity, acc_transmission): return opacity * acc_transmission def alpha2_blend(opacity, acc_transmission): ''' Consider a light collocated with the camera, multiply the transmission twice to simulate the light in a round trip ''' return opacity * acc_transmission * acc_transmission def radiance_render(ray_feature): return ray_feature[..., 1:4] def white_color(ray_feature): albedo = ray_feature[..., 1:4].clamp(0., 1.) return torch.ones_like(albedo) def simple_tone_map(color, gamma=2.2, exposure=1): return torch.pow(color * exposure + 1e-5, 1 / gamma).clamp_(0, 1) def no_tone_map(color, gamma=2.2, exposure=1): # return color.clamp return color def normalize_tone_map(color): color = F.normalize(color, dim=-1) # print(color) return color * 0.5 + 0.5
1,620
22.838235
73
py
pointnerf
pointnerf-master/models/aggregators/point_aggregators.py
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from ..helpers.networks import init_seq, positional_encoding from utils.spherical import SphericalHarm_table as SphericalHarm from ..helpers.geometrics import compute_world2local_dist class PointAggregator(torch.nn.Module): @staticmethod def modify_commandline_options(parser, is_train=True): parser.add_argument( '--feature_init_method', type=str, default="rand", help='which agg model to use [feature_interp | graphconv | affine_mix]') parser.add_argument( '--which_agg_model', type=str, default="viewmlp", help='which agg model to use [viewmlp | nsvfmlp]') parser.add_argument( '--agg_distance_kernel', type=str, default="quadric", help='which agg model to use [quadric | linear | feat_intrp | harmonic_intrp]') parser.add_argument( '--sh_degree', type=int, default=4, help='degree of harmonics') parser.add_argument( '--sh_dist_func', type=str, default="sh_quadric", help='sh_quadric | sh_linear | passfunc') parser.add_argument( '--sh_act', type=str, default="sigmoid", help='sigmoid | tanh | passfunc') parser.add_argument( '--agg_axis_weight', type=float, nargs='+', default=None, help= '(1., 1., 1.)' ) parser.add_argument( '--agg_dist_pers', type=int, default=1, help='use pers dist') parser.add_argument( '--apply_pnt_mask', type=int, default=1, help='use pers dist') parser.add_argument( '--modulator_concat', type=int, default=0, help='use pers dist') parser.add_argument( '--agg_intrp_order', type=int, default=0, help='interpolate first and feature mlp 0 | feature mlp then interpolate 1 | feature mlp color then interpolate 2') parser.add_argument( '--shading_feature_mlp_layer0', type=int, default=0, help='interp to agged features mlp num') parser.add_argument( '--shading_feature_mlp_layer1', type=int, default=2, help='interp to agged features mlp num') parser.add_argument( '--shading_feature_mlp_layer2', type=int, default=0, help='interp to agged features mlp num') parser.add_argument( '--shading_feature_mlp_layer3', type=int, default=2, help='interp to agged features mlp num') parser.add_argument( '--shading_feature_num', type=int, default=256, help='agged shading feature channel num') parser.add_argument( '--point_hyper_dim', type=int, default=256, help='agged shading feature channel num') parser.add_argument( '--shading_alpha_mlp_layer', type=int, default=1, help='agged features to alpha mlp num') parser.add_argument( '--shading_color_mlp_layer', type=int, default=1, help='agged features to alpha mlp num') parser.add_argument( '--shading_color_channel_num', type=int, default=3, help='color channel num') parser.add_argument( '--num_feat_freqs', type=int, default=0, help='color channel num') parser.add_argument( '--num_hyperfeat_freqs', type=int, default=0, help='color channel num') parser.add_argument( '--dist_xyz_freq', type=int, default=2, help='color channel num') parser.add_argument( '--dist_xyz_deno', type=float, default=0, help='color channel num') parser.add_argument( '--weight_xyz_freq', type=int, default=2, help='color channel num') parser.add_argument( '--weight_feat_dim', type=int, default=8, help='color channel num') parser.add_argument( '--agg_weight_norm', type=int, default=1, help='normalize weight, sum as 1') parser.add_argument( '--view_ori', type=int, default=0, help='0 for pe+3 orignal channels') parser.add_argument( '--agg_feat_xyz_mode', type=str, default="None", help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]') parser.add_argument( '--agg_alpha_xyz_mode', type=str, default="None", help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]') parser.add_argument( '--agg_color_xyz_mode', type=str, default="None", help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]') parser.add_argument( '--act_type', type=str, default="ReLU", # default="LeakyReLU", help='which agg xyz mode to use [None not to use | world world xyz | pers perspective xyz ]') parser.add_argument( '--act_super', type=int, default=1, # default="LeakyReLU", help='1 to use softplus and widden sigmoid for last activation') def __init__(self, opt): super(PointAggregator, self).__init__() self.act = getattr(nn, opt.act_type, None) print("opt.act_type!!!!!!!!!", opt.act_type) self.point_hyper_dim=opt.point_hyper_dim if opt.point_hyper_dim < opt.point_features_dim else opt.point_features_dim block_init_lst = [] if opt.agg_distance_kernel == "feat_intrp": feat_weight_block = [] in_channels = 2 * opt.weight_xyz_freq * 3 + opt.weight_feat_dim out_channels = int(in_channels / 2) for i in range(2): feat_weight_block.append(nn.Linear(in_channels, out_channels)) feat_weight_block.append(self.act(inplace=True)) in_channels = out_channels feat_weight_block.append(nn.Linear(in_channels, 1)) feat_weight_block.append(nn.Sigmoid()) self.feat_weight_mlp = nn.Sequential(*feat_weight_block) block_init_lst.append(self.feat_weight_mlp) elif opt.agg_distance_kernel == "sh_intrp": self.shcomp = SphericalHarm(opt.sh_degree) self.opt = opt self.dist_dim = (4 if self.opt.agg_dist_pers == 30 else 6) if self.opt.agg_dist_pers > 9 else 3 self.dist_func = getattr(self, opt.agg_distance_kernel, None) assert self.dist_func is not None, "InterpAggregator doesn't have disance_kernel {} ".format(opt.agg_distance_kernel) self.axis_weight = None if opt.agg_axis_weight is None else torch.as_tensor(opt.agg_axis_weight, dtype=torch.float32, device="cuda")[None, None, None, None, :] self.num_freqs = opt.num_pos_freqs if opt.num_pos_freqs > 0 else 0 self.num_viewdir_freqs = opt.num_viewdir_freqs if opt.num_viewdir_freqs > 0 else 0 self.pnt_channels = (2 * self.num_freqs * 3) if self.num_freqs > 0 else 3 self.viewdir_channels = (2 * self.num_viewdir_freqs * 3 + self.opt.view_ori * 3) if self.num_viewdir_freqs > 0 else 3 self.which_agg_model = opt.which_agg_model.split("_")[0] if opt.which_agg_model.startswith("feathyper") else opt.which_agg_model getattr(self, self.which_agg_model+"_init", None)(opt, block_init_lst) self.density_super_act = torch.nn.Softplus() self.density_act = torch.nn.ReLU() self.color_act = torch.nn.Sigmoid() def raw2out_density(self, raw_density): if self.opt.act_super > 0: # return self.density_act(raw_density - 1) # according to mip nerf, to stablelize the training return self.density_super_act(raw_density - 1) # according to mip nerf, to stablelize the training else: return self.density_act(raw_density) def raw2out_color(self, raw_color): color = self.color_act(raw_color) if self.opt.act_super > 0: color = color * (1 + 2 * 0.001) - 0.001 # according to mip nerf, to stablelize the training return color def viewmlp_init(self, opt, block_init_lst): dist_xyz_dim = self.dist_dim if opt.dist_xyz_freq == 0 else 2 * abs(opt.dist_xyz_freq) * self.dist_dim in_channels = opt.point_features_dim + (0 if opt.agg_feat_xyz_mode == "None" else self.pnt_channels) - (opt.weight_feat_dim if opt.agg_distance_kernel in ["feat_intrp", "meta_intrp"] else 0) - (opt.sh_degree ** 2 if opt.agg_distance_kernel == "sh_intrp" else 0) - (7 if opt.agg_distance_kernel == "gau_intrp" else 0) in_channels += (2 * opt.num_feat_freqs * in_channels if opt.num_feat_freqs > 0 else 0) + (dist_xyz_dim if opt.agg_intrp_order > 0 else 0) if opt.shading_feature_mlp_layer1 > 0: out_channels = opt.shading_feature_num block1 = [] for i in range(opt.shading_feature_mlp_layer1): block1.append(nn.Linear(in_channels, out_channels)) block1.append(self.act(inplace=True)) in_channels = out_channels self.block1 = nn.Sequential(*block1) block_init_lst.append(self.block1) else: self.block1 = self.passfunc if opt.shading_feature_mlp_layer2 > 0: in_channels = in_channels + (0 if opt.agg_feat_xyz_mode == "None" else self.pnt_channels) + ( dist_xyz_dim if (opt.agg_intrp_order > 0 and opt.num_feat_freqs == 0) else 0) out_channels = opt.shading_feature_num block2 = [] for i in range(opt.shading_feature_mlp_layer2): block2.append(nn.Linear(in_channels, out_channels)) block2.append(self.act(inplace=True)) in_channels = out_channels self.block2 = nn.Sequential(*block2) block_init_lst.append(self.block2) else: self.block2 = self.passfunc if opt.shading_feature_mlp_layer3 > 0: in_channels = in_channels + (3 if "1" in list(opt.point_color_mode) else 0) + ( 4 if "1" in list(opt.point_dir_mode) else 0) out_channels = opt.shading_feature_num block3 = [] for i in range(opt.shading_feature_mlp_layer3): block3.append(nn.Linear(in_channels, out_channels)) block3.append(self.act(inplace=True)) in_channels = out_channels self.block3 = nn.Sequential(*block3) block_init_lst.append(self.block3) else: self.block3 = self.passfunc alpha_block = [] in_channels = opt.shading_feature_num + (0 if opt.agg_alpha_xyz_mode == "None" else self.pnt_channels) out_channels = int(opt.shading_feature_num / 2) for i in range(opt.shading_alpha_mlp_layer - 1): alpha_block.append(nn.Linear(in_channels, out_channels)) alpha_block.append(self.act(inplace=False)) in_channels = out_channels alpha_block.append(nn.Linear(in_channels, 1)) self.alpha_branch = nn.Sequential(*alpha_block) block_init_lst.append(self.alpha_branch) color_block = [] in_channels = opt.shading_feature_num + self.viewdir_channels + ( 0 if opt.agg_color_xyz_mode == "None" else self.pnt_channels) out_channels = int(opt.shading_feature_num / 2) for i in range(opt.shading_color_mlp_layer - 1): color_block.append(nn.Linear(in_channels, out_channels)) color_block.append(self.act(inplace=True)) in_channels = out_channels color_block.append(nn.Linear(in_channels, 3)) self.color_branch = nn.Sequential(*color_block) block_init_lst.append(self.color_branch) for m in block_init_lst: init_seq(m) def passfunc(self, input): return input def trilinear(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * R * SR * K * 3 # return B * R * SR * K dists = dists * pnt_mask[..., None] dists = dists / grid_vox_sz # dist: [1, 797, 40, 8, 3]; pnt_mask: [1, 797, 40, 8] # dists = 1 + dists * torch.as_tensor([[1,1,1], [-1, 1, 1], [1, -1, 1], [1, 1, -1], [-1, 1, -1], [1, -1, -1], [-1, -1, 1], [-1, -1, -1]], dtype=torch.float32, device=dists.device).view(1, 1, 1, 8, 3) dists = 1 - torch.abs(dists) weights = pnt_mask * dists[..., 0] * dists[..., 1] * dists[..., 2] norm_weights = weights / torch.clamp(torch.sum(weights, dim=-1, keepdim=True), min=1e-8) # ijk = xyz.astype(np.int32) # i, j, k = ijk[:, 0], ijk[:, 1], ijk[:, 2] # V000 = data[i, j, k].astype(np.int32) # V100 = data[(i + 1), j, k].astype(np.int32) # V010 = data[i, (j + 1), k].astype(np.int32) # V001 = data[i, j, (k + 1)].astype(np.int32) # V101 = data[(i + 1), j, (k + 1)].astype(np.int32) # V011 = data[i, (j + 1), (k + 1)].astype(np.int32) # V110 = data[(i + 1), (j + 1), k].astype(np.int32) # V111 = data[(i + 1), (j + 1), (k + 1)].astype(np.int32) # xyz = xyz - ijk # x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2] # Vxyz = (V000 * (1 - x) * (1 - y) * (1 - z) # + V100 * x * (1 - y) * (1 - z) + # + V010 * (1 - x) * y * (1 - z) + # + V001 * (1 - x) * (1 - y) * z + # + V101 * x * (1 - y) * z + # + V011 * (1 - x) * y * z + # + V110 * x * y * (1 - z) + # + V111 * x * y * z) return norm_weights, embedding def avg(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * channel* R * SR * K # return B * R * SR * K weights = pnt_mask * 1.0 return weights, embedding def quadric(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * channel* R * SR * K # return B * R * SR * K if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 1] == 1 and axis_weight[..., 2] ==1): weights = 1./ torch.clamp(torch.sum(torch.square(dists[..., :3]), dim=-1), min= 1e-8) else: weights = 1. / torch.clamp(torch.sum(torch.square(dists)* axis_weight, dim=-1), min=1e-8) weights = pnt_mask * weights return weights, embedding def numquadric(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * channel* R * SR * K # return B * R * SR * K if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 1] == 1 and axis_weight[..., 2] ==1): weights = 1./ torch.clamp(torch.sum(torch.square(dists), dim=-1), min= 1e-8) else: weights = 1. / torch.clamp(torch.sum(torch.square(dists)* axis_weight, dim=-1), min=1e-8) weights = pnt_mask * weights return weights, embedding def linear(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * R * SR * K * channel # return B * R * SR * K if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 2] ==1) : weights = 1. / torch.clamp(torch.norm(dists[..., :3], dim=-1), min= 1e-6) else: weights = 1. / torch.clamp(torch.sqrt(torch.sum(torch.square(dists[...,:2]), dim=-1)) * axis_weight[..., 0] + torch.abs(dists[...,2]) * axis_weight[..., 1], min= 1e-6) weights = pnt_mask * weights return weights, embedding def numlinear(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * R * SR * K * channel # return B * R * SR * K if axis_weight is None or (axis_weight[..., 0] == 1 and axis_weight[..., 2] ==1) : weights = 1. / torch.clamp(torch.norm(dists, dim=-1), min= 1e-6) else: weights = 1. / torch.clamp(torch.sqrt(torch.sum(torch.square(dists[...,:2]), dim=-1)) * axis_weight[..., 0] + torch.abs(dists[...,2]) * axis_weight[..., 1], min= 1e-6) weights = pnt_mask * weights norm_weights = weights / torch.clamp(torch.sum(pnt_mask, dim=-1, keepdim=True), min=1) return norm_weights, embedding def sigmoid(self, input): return torch.sigmoid(input) def tanh(self, input): return torch.tanh(input) def sh_linear(self, dist_norm): return 1 / torch.clamp(dist_norm, min=1e-8) def sh_quadric(self, dist_norm): return 1 / torch.clamp(torch.square(dist_norm), min=1e-8) def sh_intrp(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * R * SR * K * channel dist_norm = torch.linalg.norm(dists, dim=-1) dist_dirs = dists / torch.clamp(dist_norm[...,None], min=1e-8) shall = self.shcomp.sh_all(dist_dirs, filp_dir=False).view(dists.shape[:-1]+(self.shcomp.total_deg ** 2,)) sh_coefs = embedding[..., :self.shcomp.total_deg ** 2] # shall: [1, 816, 24, 32, 16], sh_coefs: [1, 816, 24, 32, 16], pnt_mask: [1, 816, 24, 32] # debug: weights = pnt_mask * torch.sum(shall, dim=-1) # weights = pnt_mask * torch.sum(shall * getattr(self, self.opt.sh_act, None)(sh_coefs), dim=-1) * getattr(self, self.opt.sh_dist_func, None)(dist_norm) weights = pnt_mask * torch.sum(getattr(self, self.opt.sh_act, None)(shall * sh_coefs), dim=-1) * getattr(self, self.opt.sh_dist_func, None)(dist_norm) # changed return weights, embedding[..., self.shcomp.total_deg ** 2:] def gau_intrp(self, embedding, dists, pnt_mask, vsize, grid_vox_sz, axis_weight=None): # dists: B * R * SR * K * channel # dist: [1, 752, 40, 32, 3] B, R, SR, K, _ = dists.shape scale = torch.abs(embedding[..., 0]) # radii = vsize[2] * 20 * torch.sigmoid(embedding[..., 1:4]) rotations = torch.clamp(embedding[..., 4:7], max=np.pi / 4, min=-np.pi / 4) gau_dist = compute_world2local_dist(dists, radii, rotations)[..., 0] # print("gau_dist", gau_dist.shape) weights = pnt_mask * scale * torch.exp(-0.5 * torch.sum(torch.square(gau_dist), dim=-1)) # print("gau_dist", gau_dist.shape, gau_dist[0, 0]) # print("weights", weights.shape, weights[0, 0, 0]) return weights, embedding[..., 7:] def viewmlp(self, sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, weight, pnt_mask_flat, pts, viewdirs, total_len, ray_valid, in_shape, dists): # print("sampled_Rw2c", sampled_Rw2c.shape, sampled_xyz.shape) # assert sampled_Rw2c.dim() == 2 B, R, SR, K, _ = dists.shape sampled_Rw2c = sampled_Rw2c.transpose(-1, -2) uni_w2c = sampled_Rw2c.dim() == 2 if not uni_w2c: sampled_Rw2c_ray = sampled_Rw2c[:,:,:,0,:,:].view(-1, 3, 3) sampled_Rw2c = sampled_Rw2c.reshape(-1, 3, 3)[pnt_mask_flat, :, :] pts_ray, pts_pnt = None, None if self.opt.agg_feat_xyz_mode != "None" or self.opt.agg_alpha_xyz_mode != "None" or self.opt.agg_color_xyz_mode != "None": if self.num_freqs > 0: pts = positional_encoding(pts, self.num_freqs) pts_ray = pts[ray_valid, :] if self.opt.agg_feat_xyz_mode != "None" and self.opt.agg_intrp_order > 0: pts_pnt = pts[..., None, :].repeat(1, K, 1).view(-1, pts.shape[-1]) if self.opt.apply_pnt_mask > 0: pts_pnt=pts_pnt[pnt_mask_flat, :] viewdirs = viewdirs @ sampled_Rw2c if uni_w2c else (viewdirs[..., None, :] @ sampled_Rw2c_ray).squeeze(-2) if self.num_viewdir_freqs > 0: viewdirs = positional_encoding(viewdirs, self.num_viewdir_freqs, ori=True) ori_viewdirs, viewdirs = viewdirs[..., :3], viewdirs[..., 3:] viewdirs = viewdirs[ray_valid, :] if self.opt.agg_intrp_order == 0: feat = torch.sum(sampled_embedding * weight[..., None], dim=-2) feat = feat.view([-1, feat.shape[-1]])[ray_valid, :] if self.opt.num_feat_freqs > 0: feat = torch.cat([feat, positional_encoding(feat, self.opt.num_feat_freqs)], dim=-1) pts = pts_ray else: dists_flat = dists.view(-1, dists.shape[-1]) if self.opt.apply_pnt_mask > 0: dists_flat = dists_flat[pnt_mask_flat, :] dists_flat /= ( 1.0 if self.opt.dist_xyz_deno == 0. else float(self.opt.dist_xyz_deno * np.linalg.norm(vsize))) dists_flat[..., :3] = dists_flat[..., :3] @ sampled_Rw2c if uni_w2c else (dists_flat[..., None, :3] @ sampled_Rw2c).squeeze(-2) if self.opt.dist_xyz_freq != 0: # print(dists.dtype, (self.opt.dist_xyz_deno * np.linalg.norm(vsize)).dtype, dists_flat.dtype) dists_flat = positional_encoding(dists_flat, self.opt.dist_xyz_freq) feat= sampled_embedding.view(-1, sampled_embedding.shape[-1]) # print("feat", feat.shape) if self.opt.apply_pnt_mask > 0: feat = feat[pnt_mask_flat, :] if self.opt.num_feat_freqs > 0: feat = torch.cat([feat, positional_encoding(feat, self.opt.num_feat_freqs)], dim=-1) feat = torch.cat([feat, dists_flat], dim=-1) weight = weight.view(B * R * SR, K, 1) pts = pts_pnt # used_point_embedding = feat[..., : self.opt.point_features_dim] if self.opt.agg_feat_xyz_mode != "None": feat = torch.cat([feat, pts], dim=-1) # print("feat",feat.shape) # 501 feat = self.block1(feat) if self.opt.shading_feature_mlp_layer2>0: if self.opt.agg_feat_xyz_mode != "None": feat = torch.cat([feat, pts], dim=-1) if self.opt.agg_intrp_order > 0: feat = torch.cat([feat, dists_flat], dim=-1) feat = self.block2(feat) if self.opt.shading_feature_mlp_layer3>0: if sampled_color is not None: sampled_color = sampled_color.view(-1, sampled_color.shape[-1]) if self.opt.apply_pnt_mask > 0: sampled_color = sampled_color[pnt_mask_flat, :] feat = torch.cat([feat, sampled_color], dim=-1) if sampled_dir is not None: sampled_dir = sampled_dir.view(-1, sampled_dir.shape[-1]) if self.opt.apply_pnt_mask > 0: sampled_dir = sampled_dir[pnt_mask_flat, :] sampled_dir = sampled_dir @ sampled_Rw2c if uni_w2c else (sampled_dir[..., None, :] @ sampled_Rw2c).squeeze(-2) ori_viewdirs = ori_viewdirs[..., None, :].repeat(1, K, 1).view(-1, ori_viewdirs.shape[-1]) if self.opt.apply_pnt_mask > 0: ori_viewdirs = ori_viewdirs[pnt_mask_flat, :] feat = torch.cat([feat, sampled_dir - ori_viewdirs, torch.sum(sampled_dir*ori_viewdirs, dim=-1, keepdim=True)], dim=-1) feat = self.block3(feat) if self.opt.agg_intrp_order == 1: if self.opt.apply_pnt_mask > 0: feat_holder = torch.zeros([B * R * SR * K, feat.shape[-1]], dtype=torch.float32, device=feat.device) feat_holder[pnt_mask_flat, :] = feat else: feat_holder = feat feat = feat_holder.view(B * R * SR, K, feat_holder.shape[-1]) feat = torch.sum(feat * weight, dim=-2).view([-1, feat.shape[-1]])[ray_valid, :] alpha_in = feat if self.opt.agg_alpha_xyz_mode != "None": alpha_in = torch.cat([alpha_in, pts], dim=-1) alpha = self.raw2out_density(self.alpha_branch(alpha_in)) color_in = feat if self.opt.agg_color_xyz_mode != "None": color_in = torch.cat([color_in, pts], dim=-1) color_in = torch.cat([color_in, viewdirs], dim=-1) color_output = self.raw2out_color(self.color_branch(color_in)) # print("color_output", torch.sum(color_output), color_output.grad) output = torch.cat([alpha, color_output], dim=-1) elif self.opt.agg_intrp_order == 2: alpha_in = feat if self.opt.agg_alpha_xyz_mode != "None": alpha_in = torch.cat([alpha_in, pts], dim=-1) alpha = self.raw2out_density(self.alpha_branch(alpha_in)) # print(alpha_in.shape, alpha_in) if self.opt.apply_pnt_mask > 0: alpha_holder = torch.zeros([B * R * SR * K, alpha.shape[-1]], dtype=torch.float32, device=alpha.device) alpha_holder[pnt_mask_flat, :] = alpha else: alpha_holder = alpha alpha = alpha_holder.view(B * R * SR, K, alpha_holder.shape[-1]) alpha = torch.sum(alpha * weight, dim=-2).view([-1, alpha.shape[-1]])[ray_valid, :] # alpha: # print("alpha", alpha.shape) # alpha_placeholder = torch.zeros([total_len, 1], dtype=torch.float32, # device=alpha.device) # alpha_placeholder[ray_valid] = alpha if self.opt.apply_pnt_mask > 0: feat_holder = torch.zeros([B * R * SR * K, feat.shape[-1]], dtype=torch.float32, device=feat.device) feat_holder[pnt_mask_flat, :] = feat else: feat_holder = feat feat = feat_holder.view(B * R * SR, K, feat_holder.shape[-1]) feat = torch.sum(feat * weight, dim=-2).view([-1, feat.shape[-1]])[ray_valid, :] color_in = feat if self.opt.agg_color_xyz_mode != "None": color_in = torch.cat([color_in, pts], dim=-1) color_in = torch.cat([color_in, viewdirs], dim=-1) color_output = self.raw2out_color(self.color_branch(color_in)) # color_output = torch.sigmoid(color_output) # output_placeholder = torch.cat([alpha, color_output], dim=-1) output = torch.cat([alpha, color_output], dim=-1) # print("output_placeholder", output_placeholder.shape) output_placeholder = torch.zeros([total_len, self.opt.shading_color_channel_num + 1], dtype=torch.float32, device=output.device) output_placeholder[ray_valid] = output return output_placeholder, None def print_point(self, dists, sample_loc_w, sampled_xyz, sample_loc, sampled_xyz_pers, sample_pnt_mask): # for i in range(dists.shape[0]): # filepath = "./dists.txt" # filepath1 = "./dists10.txt" # filepath2 = "./dists20.txt" # filepath3 = "./dists30.txt" # filepath4 = "./dists40.txt" # dists_cpu = dists.detach().cpu().numpy() # np.savetxt(filepath1, dists_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";") # np.savetxt(filepath2, dists_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";") # np.savetxt(filepath3, dists_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";") # np.savetxt(filepath4, dists_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";") # dists_cpu = dists[i,...][torch.any(sample_pnt_mask, dim=-1)[i,...], :].detach().cpu().numpy() # np.savetxt(filepath, dists_cpu.reshape(-1, 3), delimiter=";") for i in range(sample_loc_w.shape[0]): filepath = "./sample_loc_w.txt" filepath1 = "./sample_loc_w10.txt" filepath2 = "./sample_loc_w20.txt" filepath3 = "./sample_loc_w30.txt" filepath4 = "./sample_loc_w40.txt" sample_loc_w_cpu = sample_loc_w.detach().cpu().numpy() np.savetxt(filepath1, sample_loc_w_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath2, sample_loc_w_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath3, sample_loc_w_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath4, sample_loc_w_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";") sample_loc_w_cpu = sample_loc_w[i,...][torch.any(sample_pnt_mask, dim=-1)[i,...], :].detach().cpu().numpy() np.savetxt(filepath, sample_loc_w_cpu.reshape(-1, 3), delimiter=";") for i in range(sampled_xyz.shape[0]): sampled_xyz_cpu = sampled_xyz.detach().cpu().numpy() filepath = "./sampled_xyz.txt" filepath1 = "./sampled_xyz10.txt" filepath2 = "./sampled_xyz20.txt" filepath3 = "./sampled_xyz30.txt" filepath4 = "./sampled_xyz40.txt" np.savetxt(filepath1, sampled_xyz_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath2, sampled_xyz_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath3, sampled_xyz_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath4, sampled_xyz_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath, sampled_xyz_cpu[i, ...].reshape(-1, 3), delimiter=";") for i in range(sample_loc.shape[0]): filepath1 = "./sample_loc10.txt" filepath2 = "./sample_loc20.txt" filepath3 = "./sample_loc30.txt" filepath4 = "./sample_loc40.txt" filepath = "./sample_loc.txt" sample_loc_cpu =sample_loc.detach().cpu().numpy() np.savetxt(filepath1, sample_loc_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath2, sample_loc_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath3, sample_loc_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath4, sample_loc_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath, sample_loc[i, ...][torch.any(sample_pnt_mask, dim=-1)[i,...], :].reshape(-1, 3).detach().cpu().numpy(), delimiter=";") for i in range(sampled_xyz_pers.shape[0]): filepath1 = "./sampled_xyz_pers10.txt" filepath2 = "./sampled_xyz_pers20.txt" filepath3 = "./sampled_xyz_pers30.txt" filepath4 = "./sampled_xyz_pers40.txt" filepath = "./sampled_xyz_pers.txt" sampled_xyz_pers_cpu = sampled_xyz_pers.detach().cpu().numpy() np.savetxt(filepath1, sampled_xyz_pers_cpu[i, 80, 0, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath2, sampled_xyz_pers_cpu[i, 80, 3, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath3, sampled_xyz_pers_cpu[i, 80, 6, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath4, sampled_xyz_pers_cpu[i, 80, 9, ...].reshape(-1, 3), delimiter=";") np.savetxt(filepath, sampled_xyz_pers_cpu[i, ...].reshape(-1, 3), delimiter=";") print("saved sampled points and shading points") exit() def gradiant_clamp(self, sampled_conf, min=0.0001, max=1): diff = sampled_conf - torch.clamp(sampled_conf, min=min, max=max) return sampled_conf - diff.detach() def forward(self, sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, grid_vox_sz): # return B * R * SR * channel ''' :param sampled_conf: B x valid R x SR x K x 1 :param sampled_embedding: B x valid R x SR x K x F :param sampled_xyz_pers: B x valid R x SR x K x 3 :param sampled_xyz: B x valid R x SR x K x 3 :param sample_pnt_mask: B x valid R x SR x K :param sample_loc: B x valid R x SR x 3 :param sample_loc_w: B x valid R x SR x 3 :param sample_ray_dirs: B x valid R x SR x 3 :param vsize: :return: ''' ray_valid = torch.any(sample_pnt_mask, dim=-1).view(-1) total_len = len(ray_valid) in_shape = sample_loc_w.shape if total_len == 0 or torch.sum(ray_valid) == 0: # print("skip since no valid ray, total_len:", total_len, torch.sum(ray_valid)) return torch.zeros(in_shape[:-1] + (self.opt.shading_color_channel_num + 1,), device=ray_valid.device, dtype=torch.float32), ray_valid.view(in_shape[:-1]), None, None if self.opt.agg_dist_pers < 0: dists = sample_loc_w[..., None, :] elif self.opt.agg_dist_pers == 0: dists = sampled_xyz - sample_loc_w[..., None, :] elif self.opt.agg_dist_pers == 1: dists = sampled_xyz_pers - sample_loc[..., None, :] elif self.opt.agg_dist_pers == 2: if sampled_xyz_pers.shape[1] > 0: xdist = sampled_xyz_pers[..., 0] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 0] * sample_loc[:, :, :, None, 2] ydist = sampled_xyz_pers[..., 1] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 1] * sample_loc[:, :, :, None, 2] zdist = sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 2] dists = torch.stack([xdist, ydist, zdist], dim=-1) else: B, R, SR, K, _ = sampled_xyz_pers.shape dists = torch.zeros([B, R, SR, K, 3], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype) elif self.opt.agg_dist_pers == 10: if sampled_xyz_pers.shape[1] > 0: dists = sampled_xyz_pers - sample_loc[..., None, :] dists = torch.cat([sampled_xyz - sample_loc_w[..., None, :], dists], dim=-1) else: B, R, SR, K, _ = sampled_xyz_pers.shape dists = torch.zeros([B, R, SR, K, 6], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype) elif self.opt.agg_dist_pers == 20: if sampled_xyz_pers.shape[1] > 0: xdist = sampled_xyz_pers[..., 0] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 0] * sample_loc[:, :, :, None, 2] ydist = sampled_xyz_pers[..., 1] * sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 1] * sample_loc[:, :, :, None, 2] zdist = sampled_xyz_pers[..., 2] - sample_loc[:, :, :, None, 2] dists = torch.stack([xdist, ydist, zdist], dim=-1) # dists = torch.cat([sampled_xyz - sample_loc_w[..., None, :], dists], dim=-1) dists = torch.cat([sampled_xyz - sample_loc_w[..., None, :], dists], dim=-1) else: B, R, SR, K, _ = sampled_xyz_pers.shape dists = torch.zeros([B, R, SR, K, 6], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype) elif self.opt.agg_dist_pers == 30: if sampled_xyz_pers.shape[1] > 0: w_dists = sampled_xyz - sample_loc_w[..., None, :] dists = torch.cat([torch.sum(w_dists*sample_ray_dirs[..., None, :], dim=-1, keepdim=True), dists], dim=-1) else: B, R, SR, K, _ = sampled_xyz_pers.shape dists = torch.zeros([B, R, SR, K, 4], device=sampled_xyz_pers.device, dtype=sampled_xyz_pers.dtype) else: print("illegal agg_dist_pers code: ", agg_dist_pers) exit() # self.print_point(dists, sample_loc_w, sampled_xyz, sample_loc, sampled_xyz_pers, sample_pnt_mask) weight, sampled_embedding = self.dist_func(sampled_embedding, dists, sample_pnt_mask, vsize, grid_vox_sz, axis_weight=self.axis_weight) if self.opt.agg_weight_norm > 0 and self.opt.agg_distance_kernel != "trilinear" and not self.opt.agg_distance_kernel.startswith("num"): weight = weight / torch.clamp(torch.sum(weight, dim=-1, keepdim=True), min=1e-8) pnt_mask_flat = sample_pnt_mask.view(-1) pts = sample_loc_w.view(-1, sample_loc_w.shape[-1]) viewdirs = sample_ray_dirs.view(-1, sample_ray_dirs.shape[-1]) conf_coefficient = 1 if sampled_conf is not None: conf_coefficient = self.gradiant_clamp(sampled_conf[..., 0], min=0.0001, max=1) output, _ = getattr(self, self.which_agg_model, None)(sampled_color, sampled_Rw2c, sampled_dir, sampled_conf, sampled_embedding, sampled_xyz_pers, sampled_xyz, sample_pnt_mask, sample_loc, sample_loc_w, sample_ray_dirs, vsize, weight * conf_coefficient, pnt_mask_flat, pts, viewdirs, total_len, ray_valid, in_shape, dists) if (self.opt.sparse_loss_weight <=0) and ("conf_coefficient" not in self.opt.zero_one_loss_items) and self.opt.prob == 0: weight, conf_coefficient = None, None return output.view(in_shape[:-1] + (self.opt.shading_color_channel_num + 1,)), ray_valid.view(in_shape[:-1]), weight, conf_coefficient
37,876
45.417892
330
py
pointnerf
pointnerf-master/models/mvs/renderer.py
import torch import torch.nn.functional as F from .mvs_utils import normal_vect, index_point_feature, build_color_volume def depth2dist(z_vals, cos_angle): # z_vals: [N_ray N_sample] device = z_vals.device dists = z_vals[..., 1:] - z_vals[..., :-1] dists = torch.cat([dists, torch.Tensor([1e10]).to(device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples] dists = dists * cos_angle.unsqueeze(-1) return dists def ndc2dist(ndc_pts, cos_angle): dists = torch.norm(ndc_pts[:, 1:] - ndc_pts[:, :-1], dim=-1) dists = torch.cat([dists, 1e10*cos_angle.unsqueeze(-1)], -1) # [N_rays, N_samples] return dists def raw2alpha(sigma, dist, net_type): alpha_softmax = F.softmax(sigma, 1) alpha = 1. - torch.exp(-sigma) T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)[:, :-1] weights = alpha * T # [N_rays, N_samples] return alpha, weights, alpha_softmax def batchify(fn, chunk): """Constructs a version of 'fn' that applies to smaller batches. """ if chunk is None: return fn def ret(inputs, alpha_only): if alpha_only: return torch.cat([fn.forward_alpha(inputs[i:i + chunk]) for i in range(0, inputs.shape[0], chunk)], 0) else: return torch.cat([fn(inputs[i:i + chunk]) for i in range(0, inputs.shape[0], chunk)], 0) return ret def run_network_mvs(pts, viewdirs, alpha_feat, fn, embed_fn, embeddirs_fn, netchunk=1024): """ Prepares inputs and applies network 'fn'. """ if embed_fn is not None: pts = embed_fn(pts) if alpha_feat is not None: pts = torch.cat((pts,alpha_feat), dim=-1) if viewdirs is not None: if viewdirs.dim()!=3: viewdirs = viewdirs[:, None].expand(-1,pts.shape[1],-1) if embeddirs_fn is not None: viewdirs = embeddirs_fn(viewdirs) pts = torch.cat([pts, viewdirs], -1) alpha_only = viewdirs is None outputs_flat = batchify(fn, netchunk)(pts, alpha_only) outputs = torch.reshape(outputs_flat, list(pts.shape[:-1]) + [outputs_flat.shape[-1]]) return outputs def raw2outputs(raw, z_vals, dists, white_bkgd=False, net_type='v2'): """Transforms model's predictions to semantically meaningful values. Args: raw: [num_rays, num_samples along ray, 4]. Prediction from model. z_vals: [num_rays, num_samples along ray]. Integration time. rays_d: [num_rays, 3]. Direction of each ray. Returns: rgb_map: [num_rays, 3]. Estimated RGB color of a ray. disp_map: [num_rays]. Disparity map. Inverse of depth map. acc_map: [num_rays]. Sum of weights along each ray. weights: [num_rays, num_samples]. Weights assigned to each sampled color. depth_map: [num_rays]. Estimated distance to object. """ device = z_vals.device rgb = raw[..., :3] # [N_rays, N_samples, 3] alpha, weights, alpha_softmax = raw2alpha(raw[..., 3], dists, net_type) # [N_rays, N_samples] rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3] depth_map = torch.sum(weights * z_vals, -1) disp_map = 1. / torch.max(1e-10 * torch.ones_like(depth_map, device=device), depth_map / torch.sum(weights, -1)) acc_map = torch.sum(weights, -1) if white_bkgd: rgb_map = rgb_map + (1. - acc_map[..., None]) return rgb_map, disp_map, acc_map, weights, depth_map, alpha def gen_angle_feature(c2ws, rays_pts, rays_dir): """ Inputs: c2ws: [1,v,4,4] rays_pts: [N_rays, N_samples, 3] rays_dir: [N_rays, 3] Returns: """ N_rays, N_samples = rays_pts.shape[:2] dirs = normal_vect(rays_pts.unsqueeze(2) - c2ws[:3, :3, 3][None, None]) # [N_rays, N_samples, v, 3] angle = torch.sum(dirs[:, :, :3] * rays_dir.reshape(N_rays,1,1,3), dim=-1, keepdim=True).reshape(N_rays, N_samples, -1) return angle def gen_dir_feature(w2c_ref, rays_dir): """ Inputs: c2ws: [1,v,4,4] rays_pts: [N_rays, N_samples, 3] rays_dir: [N_rays, 3] Returns: """ dirs = rays_dir @ w2c_ref[:3,:3].t() # [N_rays, 3] return dirs def gen_pts_feats(imgs, volume_feature, rays_pts, pose_ref, rays_ndc, feat_dim, img_feat=None, img_downscale=1.0, use_color_volume=False, net_type='v0'): N_rays, N_samples = rays_pts.shape[:2] if img_feat is not None: feat_dim += img_feat.shape[1]*img_feat.shape[2] if not use_color_volume: input_feat = torch.empty((N_rays, N_samples, feat_dim), device=imgs.device, dtype=torch.float) ray_feats = index_point_feature(volume_feature, rays_ndc) if torch.is_tensor(volume_feature) else volume_feature(rays_ndc) input_feat[..., :8] = ray_feats input_feat[..., 8:] = build_color_volume(rays_pts, pose_ref, imgs, img_feat, with_mask=True, downscale=img_downscale) else: input_feat = index_point_feature(volume_feature, rays_ndc) if torch.is_tensor(volume_feature) else volume_feature(rays_ndc) return input_feat def rendering(args, pose_ref, rays_pts, rays_ndc, depth_candidates, rays_o, rays_dir, volume_feature=None, imgs=None, network_fn=None, img_feat=None, network_query_fn=None, white_bkgd=False, **kwargs): # rays angle cos_angle = torch.norm(rays_dir, dim=-1) # using direction if pose_ref is not None: angle = gen_dir_feature(pose_ref['w2cs'][0], rays_dir/cos_angle.unsqueeze(-1)) # view dir feature else: angle = rays_dir/cos_angle.unsqueeze(-1) # rays_pts input_feat = gen_pts_feats(imgs, volume_feature, rays_pts, pose_ref, rays_ndc, args.feat_dim, \ img_feat, args.img_downscale, args.use_color_volume, args.net_type) # rays_ndc = rays_ndc * 2 - 1.0 # network_query_fn = lambda pts, viewdirs, rays_feats, network_fn: run_network_mvs(pts, viewdirs, rays_feats, # network_fn, # embed_fn=embed_fn, # embeddirs_fn=embeddirs_fn, # netchunk=args.netchunk) # run_network_mvs raw = network_query_fn(rays_ndc, angle, input_feat, network_fn) if raw.shape[-1]>4: input_feat = torch.cat((input_feat[...,:8],raw[...,4:]), dim=-1) dists = depth2dist(depth_candidates, cos_angle) # dists = ndc2dist(rays_ndc) rgb_map, disp_map, acc_map, weights, depth_map, alpha = raw2outputs(raw, depth_candidates, dists, white_bkgd,args.net_type) ret = {} return rgb_map, input_feat, weights, depth_map, alpha, ret def render_density(network_fn, rays_pts, density_feature, network_query_fn, chunk=1024 * 5): densities = [] device = density_feature.device for i in range(0, rays_pts.shape[0], chunk): input_feat = rays_pts[i:i + chunk].to(device) density = network_query_fn(input_feat, None, density_feature[i:i + chunk], network_fn) densities.append(density) return torch.cat(densities)
7,260
38.461957
153
py
pointnerf
pointnerf-master/models/mvs/mvs_utils.py
import os, torch, cv2, re import numpy as np from torch_scatter import scatter_min, segment_coo, scatter_mean from PIL import Image import torch.nn.functional as F import torchvision.transforms as T from functools import partial import matplotlib.pyplot as plt from scipy.spatial.transform import Rotation as R # Misc img2mse = lambda x, y : torch.mean((x - y) ** 2) mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.])) to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8) mse2psnr2 = lambda x : -10. * np.log(x) / np.log(10.) def get_psnr(imgs_pred, imgs_gt): psnrs = [] for (img,tar) in zip(imgs_pred,imgs_gt): psnrs.append(mse2psnr2(np.mean((img - tar.cpu().numpy())**2))) return np.array(psnrs) def init_log(log, keys): for key in keys: log[key] = torch.tensor([0.0], dtype=float) return log def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET): """ depth: (H, W) """ x = np.nan_to_num(depth) # change nan to 0 if minmax is None: mi = np.min(x[x>0]) # get minimum positive depth (ignore background) ma = np.max(x) else: mi,ma = minmax x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1 x = (255*x).astype(np.uint8) x_ = cv2.applyColorMap(x, cmap) return x_, [mi,ma] def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET): """ depth: (H, W) """ if type(depth) is not np.ndarray: depth = depth.cpu().numpy() x = np.nan_to_num(depth) # change nan to 0 if minmax is None: mi = np.min(x[x>0]) # get minimum positive depth (ignore background) ma = np.max(x) else: mi,ma = minmax x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1 x = (255*x).astype(np.uint8) x_ = Image.fromarray(cv2.applyColorMap(x, cmap)) x_ = T.ToTensor()(x_) # (3, H, W) return x_, [mi,ma] # Ray helpers def get_rays_mvs(H, W, intrinsic, c2w, N=1024, isRandom=True, is_precrop_iters=False, chunk=-1, idx=-1): device = c2w.device if isRandom: if is_precrop_iters and torch.rand((1,)) > 0.3: xs, ys = torch.randint(W//6, W-W//6, (N,)).float().to(device), torch.randint(H//6, H-H//6, (N,)).float().to(device) else: xs, ys = torch.randint(0,W,(N,)).float().to(device), torch.randint(0,H,(N,)).float().to(device) else: ys, xs = torch.meshgrid(torch.linspace(0, H - 1, H), torch.linspace(0, W - 1, W)) # pytorch's meshgrid has indexing='ij' ys, xs = ys.reshape(-1), xs.reshape(-1) if chunk>0: ys, xs = ys[idx*chunk:(idx+1)*chunk], xs[idx*chunk:(idx+1)*chunk] ys, xs = ys.to(device), xs.to(device) dirs = torch.stack([(xs-intrinsic[0,2])/intrinsic[0,0], (ys-intrinsic[1,2])/intrinsic[1,1], torch.ones_like(xs)], -1) # use 1 instead of -1 rays_d = dirs @ c2w[:3,:3].t() # dot product, equals to: [c2w.dot(dir) for dir in dirs] # Translate camera frame's origin to the world frame. It is the origin of all rays. rays_o = c2w[:3,-1].clone() pixel_coordinates = torch.stack((ys,xs)) # row col return rays_o, rays_d, pixel_coordinates def ndc_2_cam(ndc_xyz, near_far, intrinsic, W, H): inv_scale = torch.tensor([[W - 1, H - 1]], device=ndc_xyz.device) cam_z = ndc_xyz[..., 2:3] * (near_far[1] - near_far[0]) + near_far[0] cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z cam_xyz = torch.cat([cam_xy, cam_z], dim=-1) cam_xyz = cam_xyz @ torch.inverse(intrinsic[0, ...].t()) return cam_xyz def get_ndc_coordinate(w2c_ref, intrinsic_ref, point_samples, inv_scale, near=2, far=6, pad=0, lindisp=False): ''' point_samples [N_rays N_sample 3] ''' N_rays, N_samples = point_samples.shape[:2] point_samples = point_samples.reshape(-1, 3) # wrap to ref view if w2c_ref is not None: R = w2c_ref[:3, :3] # (3, 3) T = w2c_ref[:3, 3:] # (3, 1) point_samples = torch.matmul(point_samples, R.t()) + T.reshape(1,3) if intrinsic_ref is not None: # using projection point_samples_pixel = point_samples @ intrinsic_ref.t() point_samples_pixel[:,:2] = (point_samples_pixel[:,:2] / point_samples_pixel[:,-1:] + 0.0) / inv_scale.reshape(1,2) # normalize to 0~1 if not lindisp: point_samples_pixel[:,2] = (point_samples_pixel[:,2] - near) / (far - near) # normalize to 0~1 else: point_samples_pixel[:,2] = (1.0/point_samples_pixel[:,2]-1.0/near)/(1.0/far - 1.0/near) else: # using bounding box near, far = near.view(1,3), far.view(1,3) point_samples_pixel = (point_samples - near) / (far - near) # normalize to 0~1 del point_samples if pad>0: W_feat, H_feat = (inv_scale+1)/4.0 point_samples_pixel[:,1] = point_samples_pixel[:,1] * H_feat / (H_feat + pad * 2) + pad / (H_feat + pad * 2) point_samples_pixel[:,0] = point_samples_pixel[:,0] * W_feat / (W_feat + pad * 2) + pad / (W_feat + pad * 2) point_samples_pixel = point_samples_pixel.view(N_rays, N_samples, 3) return point_samples_pixel def build_color_volume(point_samples, pose_ref, imgs, img_feat=None, downscale=1.0, with_mask=False): ''' point_world: [N_ray N_sample 3] imgs: [N V 3 H W] ''' device = imgs.device N, V, C, H, W = imgs.shape inv_scale = torch.tensor([W - 1, H - 1]).to(device) C += with_mask C += 0 if img_feat is None else img_feat.shape[2] colors = torch.empty((*point_samples.shape[:2], V*C), device=imgs.device, dtype=torch.float) for i,idx in enumerate(range(V)): w2c_ref, intrinsic_ref = pose_ref['w2cs'][idx], pose_ref['intrinsics'][idx].clone() # assume camera 0 is reference point_samples_pixel = get_ndc_coordinate(w2c_ref, intrinsic_ref, point_samples, inv_scale)[None] grid = point_samples_pixel[...,:2]*2.0-1.0 grid = grid.to(imgs.dtype) data = F.grid_sample(imgs[:, idx], grid, align_corners=True, mode='bilinear', padding_mode='border') if img_feat is not None: data = torch.cat((data,F.grid_sample(img_feat[:,idx], grid, align_corners=True, mode='bilinear', padding_mode='zeros')),dim=1) if with_mask: in_mask = ((grid >-1.0)*(grid < 1.0)) in_mask = (in_mask[...,0]*in_mask[...,1]).float() data = torch.cat((data,in_mask.unsqueeze(1)), dim=1) colors[...,i*C:i*C+C] = data[0].permute(1, 2, 0) del grid, point_samples_pixel, data return colors def normal_vect(vect, dim=-1): return vect / (torch.sqrt(torch.sum(vect**2,dim=dim,keepdim=True))+1e-7) def index_point_feature(volume_feature, ray_coordinate_ref, chunk=-1): '''' Args: volume_color_feature: [B, G, D, h, w] volume_density_feature: [B C D H W] ray_dir_world:[3 ray_samples N_samples] ray_coordinate_ref: [3 N_rays N_samples] ray_dir_ref: [3 N_rays] depth_candidates: [N_rays, N_samples] Returns: [N_rays, N_samples] ''' device = volume_feature.device H, W = ray_coordinate_ref.shape[-3:-1] if chunk != -1: features = torch.zeros((volume_feature.shape[1],H,W), device=volume_feature.device, dtype=torch.float, requires_grad=volume_feature.requires_grad) grid = ray_coordinate_ref.view(1, 1, 1, H * W, 3) * 2 - 1.0 # [1 1 H W 3] (x,y,z) for i in range(0, H*W, chunk): features[:,i:i + chunk] = F.grid_sample(volume_feature, grid[:,:,:,i:i + chunk], align_corners=True, mode='bilinear')[0] features = features.permute(1,2,0) else: grid = ray_coordinate_ref.view(-1, 1, H, W, 3).to(device) * 2 - 1.0 # [1 1 H W 3] (x,y,z) features = F.grid_sample(volume_feature, grid, align_corners=True, mode='bilinear')[:,:,0].permute(2,3,0,1).squeeze()#, padding_mode="border" return features def filter_keys(dict): if 'N_samples' in dict.keys(): dict.pop('N_samples') if 'ndc' in dict.keys(): dict.pop('ndc') if 'lindisp' in dict.keys(): dict.pop('lindisp') return dict def sub_selete_data(data_batch, device, idx, filtKey=[], filtIndex=['view_ids_all','c2ws_all','scan','bbox','w2ref','ref2w','light_id','ckpt','idx']): data_sub_selete = {} for item in data_batch.keys(): data_sub_selete[item] = data_batch[item][:,idx].float() if (item not in filtIndex and torch.is_tensor(item) and item.dim()>2) else data_batch[item].float() if not data_sub_selete[item].is_cuda: data_sub_selete[item] = data_sub_selete[item].to(device) return data_sub_selete def detach_data(dictionary): dictionary_new = {} for key in dictionary.keys(): dictionary_new[key] = dictionary[key].detach().clone() return dictionary_new def read_pfm(filename): file = open(filename, 'rb') header = file.readline().decode('utf-8').rstrip() if header == 'PF': color = True elif header == 'Pf': color = False else: raise Exception('Not a PFM file.') dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8')) if dim_match: width, height = map(int, dim_match.groups()) else: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = '<' scale = -scale else: endian = '>' # big-endian data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) file.close() return data, scale def gen_render_path(c2ws, N_views=30): N = len(c2ws) rotvec, positions = [], [] rotvec_inteplat, positions_inteplat = [], [] weight = np.linspace(1.0, .0, N_views//3, endpoint=False).reshape(-1, 1) for i in range(N): r = R.from_matrix(c2ws[i, :3, :3]) euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3) if i: mask = np.abs(euler_ange - rotvec[0])>180 euler_ange[mask] += 360.0 rotvec.append(euler_ange) positions.append(c2ws[i, :3, 3:].reshape(1, 3)) if i: rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i]) positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i]) rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0]) positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0]) c2ws_render = [] angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat) for rotvec, position in zip(angles_inteplat, positions_inteplat): c2w = np.eye(4) c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix() c2w[:3, 3:] = position.reshape(3, 1) c2ws_render.append(c2w.copy()) c2ws_render = np.stack(c2ws_render) return c2ws_render from scipy.interpolate import CubicSpline ################################################# MVS helper functions ##################################### from kornia.utils import create_meshgrid def homo_warp_nongrid(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, filter=True, **kwargs): # src_grid: B, 3, D*H*W xyz B, M, _ = ref_cam_xyz.shape if w2c is not None: src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2) else: src_cam_xyz = ref_cam_xyz src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2] mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(src_grid, torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0 src_grid = src_grid.to(torch.float32) # grid xy hard_id_xy = torch.ceil(src_grid[:,:,:]) src_grid = torch.masked_select(src_grid, mask).reshape(B, -1, 2) if filter else src_grid src_grid[..., 0] = src_grid[..., 0] / ((WD - 1.0) / 2.0) - 1.0 # scale to -1~1 src_grid[..., 1] = src_grid[..., 1] / ((HD - 1.0) / 2.0) - 1.0 # scale to -1~1 return src_grid, mask, hard_id_xy def homo_warp_fg_mask(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, **kwargs): # src_grid: B, 3, D*H*W xyz B, M, _ = ref_cam_xyz.shape if w2c is not None: src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2) else: src_cam_xyz = ref_cam_xyz src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2] mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(src_grid, torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0 src_grid = src_grid.to(torch.float32) # grid xy hard_id_xy = torch.ceil(src_grid[:,:,:])[:,mask[0,...,0],:] return id2mask(hard_id_xy, HD, WD) def homo_warp_nongrid_occ(c2w, w2c, intrinsic, ref_cam_xyz, HD, WD, tolerate=0.1, scatter_cpu=True): # src_grid: B, 3, D*H*W xyz B, M, _ = ref_cam_xyz.shape if w2c is not None: src_cam_xyz = torch.cat([ref_cam_xyz, torch.ones_like(ref_cam_xyz[:,:,0:1])], dim=-1) @ c2w.transpose(1,2) @ w2c.transpose(1,2) else: src_cam_xyz = ref_cam_xyz # print("src_cam_xyz",src_cam_xyz.shape, intrinsic.shape) src_grid = ((src_cam_xyz[..., :3] / src_cam_xyz[..., 2:3]) @ intrinsic.transpose(1,2))[...,:2] # print("src_pix_xy1", src_grid.shape, torch.min(src_grid,dim=-2)[0], torch.max(src_grid,dim=-2)[0]) mask = torch.prod(torch.cat([torch.ge(src_grid, torch.zeros([1,1,2], device=src_grid.device)), torch.le(torch.ceil(src_grid), torch.tensor([[[WD-1,HD-1]]], device=src_grid.device))],dim=-1), dim=-1, keepdim=True, dtype=torch.int8) > 0 src_grid = torch.masked_select(src_grid, mask).reshape(B, -1, 2) cam_z = torch.masked_select(src_cam_xyz[:,:,2], mask[...,0]).reshape(B, -1) src_grid = src_grid.to(torch.float32) # grid xy # print("HD, WD", HD, WD) 512 640 src_grid_x = src_grid[..., 0:1] / ((WD - 1.0) / 2.0) - 1.0 # scale to -1~1 src_grid_y = src_grid[..., 1:2] / ((HD - 1.0) / 2.0) - 1.0 # scale to -1~1 # hard_id_xy: 1, 307405, 2 hard_id_xy = torch.ceil(src_grid[:,:,:]) # print("hard_id_xy", hard_id_xy.shape) index = (hard_id_xy[...,0] * HD + hard_id_xy[...,1]).long() # 1, 307405 # print("index", index.shape, torch.min(index), torch.max(index)) min_depth, argmin = scatter_min(cam_z[:,:].cpu() if scatter_cpu else cam_z[:,:], index[:,:].cpu() if scatter_cpu else index[:,:], dim=1) # print("argmin", min_depth.shape, min_depth, argmin.shape) queried_depth = min_depth.to(ref_cam_xyz.device)[:, index[0,...]] if scatter_cpu else min_depth[:, index[0,...]] block_mask = (cam_z <= (queried_depth + tolerate)) # print("mask", mask.shape, torch.sum(mask), block_mask.shape, torch.sum(block_mask)) mask[mask.clone()] = block_mask # print("mask", mask.shape, torch.sum(mask), block_mask.shape, torch.sum(block_mask)) # print("src_grid_x", src_grid_x.shape) src_grid_x = torch.masked_select(src_grid_x, block_mask[..., None]).reshape(B, -1, 1) src_grid_y = torch.masked_select(src_grid_y, block_mask[..., None]).reshape(B, -1, 1) # print("src_grid_x", src_grid_x.shape, src_grid_y.shape, mask.shape) return torch.cat([src_grid_x, src_grid_y], dim=-1), mask, hard_id_xy def id2mask(hard_id_xy, HD, WD): mask = torch.zeros([HD, WD], dtype=torch.int8, device=hard_id_xy.device) hard_id_xy = hard_id_xy.long() mask[hard_id_xy[0,...,1], hard_id_xy[0,...,0]] = 1 # torch.ones_like(hard_id_xy[0,...,0], dtype=mask.dtype) return mask def gen_bg_points(batch): plane_pnt, plane_normal = batch["plane_pnt"][0], batch["plane_normal"][0] plane_pnt, plane_normal = torch.as_tensor(plane_pnt, dtype=torch.float32, device=batch['campos'].device), torch.as_tensor(plane_normal, dtype=torch.float32, device=batch['campos'].device) cross_xyz_world = get_rayplane_cross(batch['campos'], batch['raydir'], plane_pnt[None, None, :], plane_normal[None, None, :]) return cross_xyz_world def get_rayplane_cross(cam_pos, raydir, p_co, p_no, epsilon=1e-3): """ cam_pos: 1, 3 ray_dir: Define the line. 1, 2304, 3 p_co, p_no: define the plane: p_co Is a point on the plane (plane coordinate). 1, 1, 3 p_no Is a normal vector defining the plane direction; 1, 1, 3 (does not need to be normalized). Return a Vector or None (when the intersection can't be found). """ dot = torch.sum(p_no * raydir, dim=-1) # 1, 2304 board_mask = dot >= epsilon dot_valid = dot[board_mask][None,:] # 1, 2304 w = cam_pos[None,:,:] - p_co # torch.Size([1, 1, 3]) fac = -torch.sum(p_no * w, dim=-1) / dot_valid # 1, 2304 ray_dir_valid = raydir[:,board_mask[0],:] ray_dir_valid = ray_dir_valid * fac[..., None] # 1, 2304, 3 intersect_world_valid = cam_pos[None,...] + ray_dir_valid # 1, 2304, 3 intersect_world = torch.zeros_like(raydir) intersect_world[:,board_mask[0],:] = intersect_world_valid return intersect_world def extract_from_2d_grid(src_feat, src_grid, mask): B, M, _ = src_grid.shape warped_src_feat = F.grid_sample(src_feat, src_grid[:, None, ...], mode='bilinear', padding_mode='zeros', align_corners=True) # (B, C, D, H*W) warped_src_feat = warped_src_feat.permute(0,2,3,1).view(B, M, src_feat.shape[1]).cuda() # 1, 224874, 3 if mask is not None: B, N, _ = mask.shape full_src_feat = torch.zeros([B, N, src_feat.shape[1]], device=warped_src_feat.device, dtype=warped_src_feat.dtype) full_src_feat[0, mask[0,:,0], :] = warped_src_feat warped_src_feat = full_src_feat return warped_src_feat def homo_warp(src_feat, proj_mat, depth_values, src_grid=None, pad=0): """ src_feat: (B, C, H, W) proj_mat: (B, 3, 4) equal to "src_proj @ ref_proj_inv" depth_values: (B, D, H, W) out: (B, C, D, H, W) """ if src_grid==None: B, C, H, W = src_feat.shape device = src_feat.device if pad>0: H_pad, W_pad = H + pad*2, W + pad*2 else: H_pad, W_pad = H, W depth_values = depth_values[...,None,None].repeat(1, 1, H_pad, W_pad) D = depth_values.shape[1] R = proj_mat[:, :, :3] # (B, 3, 3) T = proj_mat[:, :, 3:] # (B, 3, 1) # create grid from the ref frame ref_grid = create_meshgrid(H_pad, W_pad, normalized_coordinates=False, device=device) # (1, H, W, 2) if pad>0: ref_grid -= pad ref_grid = ref_grid.permute(0, 3, 1, 2) # (1, 2, H, W) ref_grid = ref_grid.reshape(1, 2, W_pad * H_pad) # (1, 2, H*W) ref_grid = ref_grid.expand(B, -1, -1) # (B, 2, H*W) ref_grid = torch.cat((ref_grid, torch.ones_like(ref_grid[:, :1])), 1) # (B, 3, H*W) ref_grid_d = ref_grid.repeat(1, 1, D) # (B, 3, D*H*W), X, Y, Z src_grid_d = R @ ref_grid_d + T / depth_values.view(B, 1, D * W_pad * H_pad) del ref_grid_d, ref_grid, proj_mat, R, T, depth_values # release (GPU) memory src_grid = src_grid_d[:, :2] / src_grid_d[:, 2:] # divide by depth (B, 2, D*H*W) del src_grid_d src_grid[:, 0] = src_grid[:, 0] / ((W - 1) / 2) - 1 # scale to -1~1 src_grid[:, 1] = src_grid[:, 1] / ((H - 1) / 2) - 1 # scale to -1~1 src_grid = src_grid.permute(0, 2, 1) # (B, D*H*W, 2) src_grid = src_grid.view(B, D, H_pad, W_pad, 2) B, D, H_pad, W_pad = src_grid.shape[:4] src_grid = src_grid.to(src_feat.dtype) # 1, 32, 128, 160 warped_src_feat = F.grid_sample(src_feat, src_grid.view(B, D, H_pad * W_pad, 2), mode='bilinear', padding_mode='zeros', align_corners=True) # (B, C, D, H*W) warped_src_feat = warped_src_feat.view(B, -1, D, H_pad, W_pad) return warped_src_feat, src_grid ############################### render path #################################### def normalize(v): """Normalize a vector.""" return v/np.linalg.norm(v) from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR from warmup_scheduler import GradualWarmupScheduler def construct_vox_points(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None): # xyz, N, 3 xyz = xyz_val if partition_xyz is None else partition_xyz if space_min is None: xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0] space_edge = torch.max(xyz_max - xyz_min) * 1.05 xyz_mid = (xyz_max + xyz_min) / 2 space_min = xyz_mid - space_edge / 2 space_max = xyz_mid + space_edge / 2 else: space_edge = space_max - space_min construct_vox_sz = space_edge / vox_res xyz_shift = xyz - space_min[None, ...] sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True) xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0) min_idx, _ = scatter_min(torch.arange(len(xyz), device=xyz.device), inv_idx, dim=0) return xyz_centroid, sparse_grid_idx, min_idx def construct_vox_points_xyz(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None): # xyz, N, 3 xyz = xyz_val if partition_xyz is None else partition_xyz if space_min is None: xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0] space_edge = torch.max(xyz_max - xyz_min) * 1.05 xyz_mid = (xyz_max + xyz_min) / 2 space_min = xyz_mid - space_edge / 2 else: space_edge = space_max - space_min construct_vox_sz = space_edge / vox_res xyz_shift = xyz - space_min[None, ...] sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True) xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0) return xyz_centroid def construct_vox_points_ind(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None): # xyz, N, 3 xyz = xyz_val if partition_xyz is None else partition_xyz if space_min is None: xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0] space_edge = torch.max(xyz_max - xyz_min) * 1.05 xyz_mid = (xyz_max + xyz_min) / 2 space_min = xyz_mid - space_edge / 2 space_max = xyz_mid + space_edge / 2 else: space_edge = space_max - space_min construct_vox_sz = space_edge / vox_res xyz_shift = xyz - space_min[None, ...] sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True) return sparse_grid_idx, inv_idx, space_min, space_max def construct_vox_points_closest(xyz_val, vox_res, partition_xyz=None, space_min=None, space_max=None): # xyz, N, 3 xyz = xyz_val if partition_xyz is None else partition_xyz if space_min is None: xyz_min, xyz_max = torch.min(xyz, dim=-2)[0], torch.max(xyz, dim=-2)[0] space_edge = torch.max(xyz_max - xyz_min) * 1.05 xyz_mid = (xyz_max + xyz_min) / 2 space_min = xyz_mid - space_edge / 2 else: space_edge = space_max - space_min mask = (xyz_val - space_min[None,...]) mask *= (space_max[None,...] - xyz_val) mask = torch.prod(mask, dim=-1) > 0 xyz_val = xyz_val[mask, :] construct_vox_sz = space_edge / vox_res xyz_shift = xyz - space_min[None, ...] sparse_grid_idx, inv_idx = torch.unique(torch.floor(xyz_shift / construct_vox_sz[None, ...]).to(torch.int32), dim=0, return_inverse=True) xyz_centroid = scatter_mean(xyz_val, inv_idx, dim=0) xyz_centroid_prop = xyz_centroid[inv_idx,:] xyz_residual = torch.norm(xyz_val - xyz_centroid_prop, dim=-1) print("xyz_residual", xyz_residual.shape) _, min_idx = scatter_min(xyz_residual, inv_idx, dim=0) print("min_idx", min_idx.shape) return xyz_centroid, sparse_grid_idx, min_idx def transform_points_to_voxels(points, point_cloud_range, voxel_sizes, max_pnts_per_vox, max_voxels, voxel_generator=None): voxel_output = voxel_generator.generate(points) if isinstance(voxel_output, dict): voxels, coordinates, num_points = \ voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] else: voxels, coordinates, num_points = voxel_output return voxels, coordinates, num_points def alpha_masking(points, alphas, intrinsics, c2ws, w2cs, near_far, opt=None): w_xyz1 = torch.cat([points[..., :3], torch.ones_like(points[..., :1])], dim=-1) H, W = alphas[0][0].shape vishull_mask = None range_mask = None for i in range(len(alphas)): alpha, intrinsic, c2w, w2c = torch.as_tensor(alphas[i][0], dtype=points.dtype, device=points.device), torch.as_tensor(intrinsics[i], dtype=points.dtype, device=points.device), torch.as_tensor(c2ws[i], dtype=points.dtype, device=points.device), torch.as_tensor(w2cs[i], dtype=points.dtype, device=points.device) # print("w_xyz1",w_xyz1.shape, w2c.shape, intrinsic.shape, alpha.shape) cam_xyz = w_xyz1 @ w2c.t() torch.cuda.empty_cache() if near_far is not None: near_far_mask = torch.logical_and(cam_xyz[...,2]>=(near_far[0]-1.0), cam_xyz[...,2]<=near_far[1]) cam_xyz = cam_xyz[...,:3] @ intrinsic.t() img_xy = torch.floor(cam_xyz[:, :2] / cam_xyz[:, -1:] + 0.0).long() del cam_xyz torch.cuda.empty_cache() if opt is not None and (opt.alpha_range > 0 or opt.inall_img == 0): range_mask = torch.logical_and(img_xy >= torch.zeros((1,2), dtype=img_xy.dtype, device=img_xy.device), img_xy < torch.as_tensor([[W,H]], dtype=img_xy.dtype, device=img_xy.device)) range_mask = torch.prod(range_mask, dim=-1) > 0 img_xy[..., 0] = torch.clamp(img_xy[..., 0], min=0, max=W-1) img_xy[..., 1] = torch.clamp(img_xy[..., 1], min=0, max=H-1) mask = alpha[img_xy[..., 1], img_xy[..., 0]] if range_mask is not None: mask = mask + (~range_mask).to(torch.float32) mask = mask > 0.1 if near_far is not None: vishull_mask = (mask*near_far_mask) if vishull_mask is None else vishull_mask*(mask*near_far_mask) else: vishull_mask=mask if vishull_mask is None else vishull_mask*mask del img_xy torch.cuda.empty_cache() del range_mask print("vishull_mask", vishull_mask.shape) return vishull_mask > 0
26,948
43.397035
318
py
pointnerf
pointnerf-master/models/mvs/mvs_points_model.py
import torch import os from torch.utils.data import DataLoader import imageio # models from .models import * from .renderer import * from .mvs_utils import * from . import filter_utils from ..helpers.networks import init_seq from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet from torch.optim.lr_scheduler import CosineAnnealingLR from inplace_abn import InPlaceABN from collections import OrderedDict device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from torchvision import transforms as T feature_str_lst=['appr_feature_str0', 'appr_feature_str1', 'appr_feature_str2', 'appr_feature_str3'] def premlp_init(opt): in_channels = 63 out_channels = opt.point_features_dim blocks = [] act = getattr(nn, opt.act_type, None) for i in range(opt.shading_feature_mlp_layer1): blocks.append(nn.Linear(in_channels, out_channels)) blocks.append(act(inplace=True)) in_channels = out_channels blocks = nn.Sequential(*blocks) init_seq(blocks) return blocks class MvsPointsModel(nn.Module): def __init__(self, args): super(MvsPointsModel, self).__init__() self.args = args self.args.feat_dim = 8+3*4 self.idx = 0 # Create nerf model self.render_kwargs_train, self.render_kwargs_test, start = create_mvs(args, mvs_mode=self.args.manual_depth_view, depth=args.depth_grid) filter_keys(self.render_kwargs_train) # Create mvs model self.MVSNet = self.render_kwargs_train['network_featmvs'] if args.pre_d_est is not None and self.args.manual_depth_view > 0 : self.load_pretrained_d_est(self.MVSNet, args.pre_d_est) self.FeatureNet = self.render_kwargs_train['network_2d'] self.render_kwargs_train.pop('network_featmvs') self.render_kwargs_train.pop('network_2d') self.render_kwargs_train['NDC_local'] = False if self.args.manual_depth_view == -1: self.ProbNet = ProbNet(8).to(device) if self.args.shading_feature_mlp_layer0 > 0: self.premlp = premlp_init(args) # self.eval_metric = [0.01, 0.05, 0.1] self.sample_func = getattr(self, args.mvs_point_sampler, None) self.cnt = 0 def load_pretrained_d_est(self, model, pre_d_est): # load checkpoint file specified by args.loadckpt print("loading model {}".format(pre_d_est)) state_dict = torch.load(pre_d_est, map_location=lambda storage, loc: storage) new_state_dict = OrderedDict() for k, v in state_dict['model'].items(): name = k[7:] # remove module. new_state_dict[name] = v model.load_state_dict(new_state_dict) @staticmethod def modify_commandline_options(parser, is_train=True): parser.add_argument("--mvs_lr", type=float, default=5e-4, help='learning rate') parser.add_argument('--pad', type=int, default=24) parser.add_argument('--depth_grid', type=int, default=128) parser.add_argument('--prob_thresh', type=float, default=0.8) parser.add_argument('--dprob_thresh', type=float, default=0.8) parser.add_argument('--num_neighbor', type=int, default=1) parser.add_argument('--depth_vid', type=str, default="0", help="0123") parser.add_argument('--ref_vid', type=int, default=0, help="0, 1, 2, or 3") parser.add_argument('--num_each_depth', type=int, default=1) parser.add_argument('--depth_conf_thresh', type=float, default=None) parser.add_argument('--depth_occ', type=int, default=0) parser.add_argument('--manual_depth_view', type=int, default=0, help="-1 for learning probability, 0 for gt, 1 for pretrained MVSNet") parser.add_argument('--pre_d_est', type=str, default=None, help="loading pretrained depth estimator") parser.add_argument('--manual_std_depth', type=float, default=0) parser.add_argument('--far_plane_shift', type=float, default=None) parser.add_argument('--mvs_point_sampler', type=str, default="gau_single_sampler") parser.add_argument('--appr_feature_str0', type=str, nargs='+', # default=["imgfeat_0_0123", "vol"], default=["imgfeat_0_0", "vol"], help= "which feature_map") parser.add_argument('--appr_feature_str1', type=str, nargs='+', # default=["imgfeat_0_0123", "vol"], default=["imgfeat_0_0", "vol"], help= "which feature_map") parser.add_argument('--appr_feature_str2', type=str, nargs='+', # default=["imgfeat_0_0123", "vol"], default=["imgfeat_0_0", "vol"], help= "which feature_map") parser.add_argument('--appr_feature_str3', type=str, nargs='+', # default=["imgfeat_0_0123", "vol"], default=["imgfeat_0_0", "vol"], help= "which feature_map") parser.add_argument('--vox_res', type=int, default=0, help='vox_resolution if > 0') def decode_batch(self, batch, idx=list(torch.arange(4))): data_mvs = sub_selete_data(batch, device, idx, filtKey=[]) pose_ref = {'w2cs': data_mvs['w2cs'].squeeze(), 'intrinsics': data_mvs['intrinsics'].squeeze(), 'c2ws': data_mvs['c2ws'].squeeze(),'near_fars':data_mvs['near_fars'].squeeze()} return data_mvs, pose_ref def normalize_rgb(self, data, shape=(1,1,3,1,1)): # to unnormalize image for visualization # data N V C H W device = data.device mean = torch.tensor([0.485, 0.456, 0.406]).view(*shape).to(device) std = torch.tensor([0.229, 0.224, 0.225]).view(*shape).to(device) return (data - mean) / std def gau_single_sampler(self, volume_prob, args, ref_intrinsic, near_far, cam_expected_depth=None, ndc_std_depth=None): # volume_prob # ([1, 1, 128, 176, 208]) if cam_expected_depth is None: B, C, D, H, W = volume_prob.shape v = 1.0 / D ndc_depths = torch.linspace(0.5 * v, 1.0 - 0.5 * v, steps=D, device=volume_prob.device)[None, None, :, None, None].expand(1, 1, -1, H, W) # B, C, H, W ndc_expected_depth = torch.sum(volume_prob * ndc_depths, dim=2) # ([1, 1, 1, 176, 208]) ndc_std_depth = torch.sqrt(torch.sum(volume_prob * torch.square(ndc_depths-ndc_expected_depth), dim=2)) #([1, 1, 176, 208]) mask = self.prob_filter(args.dprob_thresh, args.num_neighbor, volume_prob, ndc_expected_depth, ndc_std_depth) else: # [1, 1, 512, 640] mask = torch.logical_and(cam_expected_depth >= near_far[0], cam_expected_depth <= near_far[1]) ndc_expected_depth = (cam_expected_depth - near_far[0]) / (near_far[1] - near_far[0]) sampled_depth = self.sample_by_gau(ndc_expected_depth, ndc_std_depth, args) #([1, 1, 5, 512, 640]) ndc_xyz, cam_xyz = self.depth2point(sampled_depth, ref_intrinsic, near_far) # 1, 1, 512, 640, 3 return ndc_xyz, cam_xyz, ndc_expected_depth.shape[-2:], mask def sample_by_gau(self, ndc_expected_depth, ndc_std_depth, args): B, C, H, W = ndc_expected_depth.shape N = args.num_each_depth # [1, 5, 1, 176, 208] sampled_depth = ndc_std_depth[:,None,...] * torch.normal(mean=torch.zeros((B, N, C, H, W), device="cuda"), std=torch.ones((B, N, C, H, W), device=ndc_expected_depth.device)) + ndc_expected_depth[:,None,...] return torch.clamp(sampled_depth, min=0.0, max=1.0) def depth2point(self, sampled_depth, ref_intrinsic, near_far): B, N, C, H, W = sampled_depth.shape valid_z = sampled_depth valid_x = torch.arange(W, dtype=torch.float32, device=sampled_depth.device) / (W - 1) valid_y = torch.arange(H, dtype=torch.float32, device=sampled_depth.device) / (H - 1) valid_y, valid_x = torch.meshgrid(valid_y, valid_x) # B,N,H,W valid_x = valid_x[None, None, None, ...].expand(B, N, C, -1, -1) valid_y = valid_y[None, None, None, ...].expand(B, N, C, -1, -1) ndc_xyz = torch.stack([valid_x, valid_y, valid_z], dim=-1).view(B, N, C, H, W, 3) # 1, 1, 5, 512, 640, 3 cam_xyz = ndc_2_cam(ndc_xyz, near_far, ref_intrinsic, W, H) # 1, 1, 5, 512, 640, 3 return ndc_xyz, cam_xyz def prob_filter(self, thresh, num_neighbor, volume_prob, ndc_expected_depth, ndc_std_depth): B, C, D, H, W = volume_prob.shape ceil_idx = torch.ceil(ndc_expected_depth) lower_idx = ceil_idx - num_neighbor // 2 + 1 # B, C, 1, H, W # upper_idx = ceil_idx + num_neighbor // 2 shifts = torch.arange(0, num_neighbor, device=volume_prob.device, dtype=torch.int64)[None, :, None, None] idx = torch.clamp(lower_idx.to(torch.int64) + shifts, min=0, max=D-1) # B, num_neighbor, H, W select_probs = torch.gather(torch.squeeze(volume_prob, dim=1), 1, idx) # B, num_neighbor, H, W sumprobs = torch.sum(select_probs, dim=1, keepdim=True) #([1, 1, 176, 208]) mask = sumprobs > thresh return mask def extract_2d(self, img_feats, view_ids, layer_ids, intrinsics, c2ws, w2cs, cam_xyz, HD, WD, cam_vid=0): out_feats = [] colors = [] for vid in view_ids: w2c = w2cs[:,vid,...] if vid != cam_vid else None warp = homo_warp_nongrid_occ if self.args.depth_occ > 0 else homo_warp_nongrid src_grid, mask, hard_id_xy = warp(c2ws[:,cam_vid,...], w2c, intrinsics[:,vid,...], cam_xyz, HD, WD, tolerate=0.1) warped_feats = [] for lid in layer_ids: img_feat = img_feats[lid] # 3, 32, 128, 160 warped_src_feat = extract_from_2d_grid(img_feat[vid:vid+1, ...], src_grid, mask) if lid == 0: colors.append(warped_src_feat) else: warped_feats.append(warped_src_feat) warped_feats = torch.cat(warped_feats, dim=-1) out_feats.append(warped_feats) out_feats = torch.cat(out_feats, dim=-1) colors = torch.cat(colors, dim=-1) if len(colors) > 0 else None return out_feats, colors def get_image_features(self, imgs): return self.FeatureNet(imgs[:, :self.args.init_view_num]) def query_embedding(self, HDWD, cam_xyz, photometric_confidence, img_feats, c2ws, w2cs, intrinsics, cam_vid, pointdir_w=False): HD, WD = HDWD points_embedding = [] points_dirs = None points_conf = None points_colors = None for feat_str in getattr(self.args, feature_str_lst[cam_vid]): if feat_str.startswith("imgfeat"): _, view_ids, layer_ids = feat_str.split("_") view_ids = [int(a) for a in list(view_ids)] layer_ids = [int(a) for a in list(layer_ids)] twoD_feats, points_colors = self.extract_2d(img_feats, view_ids, layer_ids, intrinsics, c2ws, w2cs, cam_xyz, HD, WD, cam_vid=cam_vid) points_embedding.append(twoD_feats) elif feat_str.startswith("dir"): _, view_ids = feat_str.split("_") view_ids = torch.as_tensor([int(a) for a in list(view_ids)], dtype=torch.int64, device=cam_xyz.device) cam_pos_world = c2ws[:, view_ids, :, 3] # B, V, 4 cam_trans = w2cs[:, cam_vid, ...] # B, 4, 4 cam_pos_cam = (cam_pos_world @ cam_trans.transpose(1, 2))[...,:3] # B, V, 4 points_dirs = cam_xyz[:,:, None, :] - cam_pos_cam[:, None, :, :] # B, N, V, 3 in current cam coord points_dirs = points_dirs / (torch.linalg.norm(points_dirs, dim=-1, keepdims=True) + 1e-6) # B, N, V, 3 points_dirs = points_dirs.view(cam_xyz.shape[0], -1, 3) @ c2ws[:, cam_vid, :3, :3].transpose(1, 2) if not pointdir_w: points_dirs = points_dirs @ c2ws[:, self.args.ref_vid, :3, :3].transpose(1, 2) # in ref cam coord # print("points_dirs", points_dirs.shape) points_dirs = points_dirs.view(cam_xyz.shape[0], cam_xyz.shape[1], -1) elif feat_str.startswith("point_conf"): if photometric_confidence is None: photometric_confidence = torch.ones_like(points_embedding[0][...,0:1]) points_conf = photometric_confidence points_embedding = torch.cat(points_embedding, dim=-1) if self.args.shading_feature_mlp_layer0 > 0: points_embedding = self.premlp(torch.cat([points_embedding, points_colors, points_dirs, points_conf], dim=-1)) return points_embedding, points_colors, points_dirs, points_conf def gen_points(self, batch): if 'scan' in batch.keys(): batch.pop('scan') log, loss = {},0 data_mvs, pose_ref = self.decode_batch(batch) imgs, proj_mats = data_mvs['images'], data_mvs['proj_mats'] near_fars, depths_h = data_mvs['near_fars'], data_mvs['depths_h'] if 'depths_h' in data_mvs else None # print("depths_h", batch["near_fars"], depths_h.shape, depths_h[0,0,:,:]) # volume_feature:(1, 8, D, 176, 208) img_feat:(B, V, C, h, w) cam_expected_depth = None ndc_std_depth = None # volume_feature: 1, 8, 128, 176, 208; # img_feat: 1, 3, 32, 128, 160; # depth_values: 1, 128 photometric_confidence_lst=[] cam_xyz_lst = [] nearfar_mask_lst = [] volume_prob = None # w2c_ref = batch["w2cs"][:, self.args.ref_vid, ...].transpose(1, 2) depth_vid = [int(self.args.depth_vid[i]) for i in range(len(self.args.depth_vid))] if self.args.manual_depth_view < 1: if self.args.manual_depth_view == -1: img_feats = self.FeatureNet(imgs[:, :self.args.init_view_num]) for i in range(len(depth_vid)): vid = depth_vid[i] if self.args.manual_depth_view == -1: volume_feature, img_feats, depth_values = self.MVSNet(imgs[:, :self.args.init_view_num], img_feats, proj_mats[:, vid, :3], near_fars[0, vid], pad=self.args.pad, vid=vid) volume_prob = self.ProbNet(volume_feature) # ([1, 1, 128, 176, 208]) # print("volume_prob", volume_prob.shape) elif self.args.manual_depth_view == 0: cam_expected_depth = depths_h[:,vid:vid+1,...] ndc_std_depth = torch.ones_like(cam_expected_depth) * self.args.manual_std_depth ndc_xyz, cam_xyz, HDWD, nearfar_mask = self.sample_func(volume_prob, self.args, batch["intrinsics"][:, vid, ...], near_fars[0, vid], cam_expected_depth=cam_expected_depth, ndc_std_depth=ndc_std_depth) if cam_xyz.shape[1] > 0: cam_xyz_lst.append(cam_xyz) nearfar_mask_lst.append(nearfar_mask) else: near_far_depth = batch["near_fars_depth"][0] depth_interval, depth_min = (near_far_depth[1] - near_far_depth[0]) / 192., near_far_depth[0] depth_values = (depth_min + torch.arange(0, 192, device="cuda", dtype=torch.float32) * depth_interval)[None, :] dimgs = batch["mvs_images"] if "mvs_images" in batch else imgs bmvs_2d_features=None # print("dimgs",dimgs.shape) bimgs = dimgs[:, :self.args.init_view_num].expand(len(self.args.depth_vid), -1, -1, -1, -1) bvid = torch.as_tensor(depth_vid, dtype=torch.long, device="cuda") bproj_mats = proj_mats[0, bvid, ...] bdepth_values = depth_values.expand(len(self.args.depth_vid), -1) if self.args.manual_depth_view == 1: with torch.no_grad(): # 1, 128, 160; 1, 128, 160; prob_volume: 1, 192, 128, 160 depths_h, photometric_confidence, _, _ = self.MVSNet(bimgs, bproj_mats, bdepth_values, features=bmvs_2d_features) depths_h, photometric_confidence = depths_h[:,None,...], photometric_confidence[:,None,...] # B,N,H,W,3, B,N,H,W,3, 1, 1,1,H,W else: dnum = self.args.manual_depth_view with torch.no_grad(): # prob_volume: 1, 192, 128, 160 _, prob_sm_volume, prob_raw_volume = self.MVSNet( bimgs, bproj_mats, bdepth_values, features=bmvs_2d_features, prob_only=True) # prob_volume = torch.sigmoid(prob_raw_volume) prob_volume = prob_sm_volume photometric_confidence, topk_idx = torch.topk(prob_volume, dnum, dim=1) # 1, 5, 128, 160; 1, 5, 128, 160 depths_h = torch.cat([depth_values[0,topk_idx[i].view(-1)].view(1, dnum, prob_volume.shape[-2], prob_volume.shape[-1]) for i in range(len(depth_vid))], dim=0) bcam_expected_depth = torch.nn.functional.interpolate(depths_h, size=list(dimgs.shape)[-2:], mode='nearest') photometric_confidence = torch.nn.functional.interpolate(photometric_confidence, size=list(dimgs.shape)[-2:], mode='nearest') # 1, 1, H, W photometric_confidence_lst = torch.unbind(photometric_confidence[:,None,...], dim=0) bndc_std_depth = torch.ones_like(bcam_expected_depth) * self.args.manual_std_depth for i in range(len(depth_vid)): vid = depth_vid[i] cam_expected_depth, ndc_std_depth = bcam_expected_depth[i:i+1], bndc_std_depth[i:i+1] ndc_xyz, cam_xyz, HDWD, nearfar_mask = self.sample_func(volume_prob, self.args, batch["intrinsics"][:, vid,...], near_fars[0, vid], cam_expected_depth=cam_expected_depth, ndc_std_depth=ndc_std_depth) if cam_xyz.shape[1] > 0: cam_xyz_lst.append(cam_xyz) nearfar_mask_lst.append(nearfar_mask) return cam_xyz_lst, photometric_confidence_lst, nearfar_mask_lst, HDWD, data_mvs, [batch["intrinsics"][:,int(vid),...] for vid in self.args.depth_vid], [batch["w2cs"][:,int(vid),...] for vid in self.args.depth_vid] def forward(self, batch): # 3 , 3, 3, 2, 4, dict, 3, 3 cam_xyz_lst, photometric_confidence_lst, nearfar_mask_lst, HDWD, data_mvs, intrinsics_lst, extrinsics_lst = self.gen_points(batch) # #################### FILTER by Masks ################## gpu_filter = True if self.args.manual_depth_view != 0: # cuda filter if gpu_filter: cam_xyz_lst, _, photometric_confidence_lst = filter_utils.filter_by_masks_gpu(cam_xyz_lst, intrinsics_lst, extrinsics_lst, photometric_confidence_lst, nearfar_mask_lst, self.args) else: cam_xyz_lst, _, photometric_confidence_lst = filter_utils.filter_by_masks([cam_xyz.cpu().numpy() for cam_xyz in cam_xyz_lst], [intrinsics.cpu().numpy() for intrinsics in intrinsics_lst], [extrinsics.cpu().numpy() for extrinsics in extrinsics_lst], [confidence.cpu().numpy() for confidence in photometric_confidence_lst], [nearfar_mask.cpu().numpy() for nearfar_mask in nearfar_mask_lst], self.args) cam_xyz_lst = [torch.as_tensor(cam_xyz, device="cuda", dtype=torch.float32) for cam_xyz in cam_xyz_lst] photometric_confidence_lst = [torch.as_tensor(confidence, device="cuda", dtype=torch.float32) for confidence in photometric_confidence_lst] else: B, N, C, H, W, _ = cam_xyz_lst[0].shape cam_xyz_lst = [cam_xyz.view(C, H, W, 3) for cam_xyz in cam_xyz_lst] cam_xyz_lst = [cam_xyz[nearfar_mask[0,...], :] for cam_xyz, nearfar_mask in zip(cam_xyz_lst, nearfar_mask_lst)] # print("after filterd", cam_xyz_lst[0].shape) photometric_confidence_lst = [torch.ones_like(cam_xyz[...,0]) for cam_xyz in cam_xyz_lst] # img_feats = self.get_image_features(batch['images']) img_feats = self.get_image_features(batch['mvs_images']) points_features_lst = [self.query_embedding(HDWD, torch.as_tensor(cam_xyz_lst[i][None, ...], device="cuda", dtype=torch.float32), photometric_confidence_lst[i][None, ..., None], img_feats, data_mvs['c2ws'], data_mvs['w2cs'], batch["intrinsics"], int(self.args.depth_vid[i]), pointdir_w=False) for i in range(len(cam_xyz_lst))] # #################### start query embedding ################## xyz_ref_lst = [(torch.cat([xyz_cam, torch.ones_like(xyz_cam[..., 0:1])], dim=-1) @ torch.linalg.inv( cam_extrinsics[0]).transpose(0, 1) @ batch["w2cs"][0, self.args.ref_vid, ...].transpose(0, 1))[..., :3] for xyz_cam, cam_extrinsics in zip(cam_xyz_lst, extrinsics_lst)] ref_xyz = torch.cat(xyz_ref_lst, dim=0) points_embedding = torch.cat([points_features[0] for points_features in points_features_lst], dim=1) points_colors = torch.cat([points_features[1] for points_features in points_features_lst], dim=1) if points_features_lst[0][1] is not None else None points_ref_dirs = torch.cat([points_features[2] for points_features in points_features_lst], dim=1) if points_features_lst[0][2] is not None else None points_conf = torch.cat([points_features[3] for points_features in points_features_lst], dim=1) if points_features_lst[0][3] is not None else None return ref_xyz, points_embedding, points_colors, points_ref_dirs, points_conf def save_points(self, xyz, dir, total_steps): if xyz.ndim < 3: xyz = xyz[None, ...] os.makedirs(dir, exist_ok=True) for i in range(xyz.shape[0]): if isinstance(total_steps, str): filename = 'step-{}-{}.txt'.format(total_steps, i) else: filename = 'step-{:04d}-{}.txt'.format(total_steps, i) filepath = os.path.join(dir, filename) np.savetxt(filepath, xyz[i, ...].reshape(-1, xyz.shape[-1]), delimiter=";") def save_image(self, img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath)
22,547
54.674074
414
py
pointnerf
pointnerf-master/models/mvs/filter_utils.py
import sys import os import pathlib # sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import torch.nn.functional as F import copy import torch import numpy as np import time from models.mvs import mvs_utils from tqdm import tqdm import cv2 from PIL import Image def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src): width, height = depth_ref.shape[1], depth_ref.shape[0] ## step1. project reference pixels to the source view # reference view x, y x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height)) x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1]) # reference 3D space xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref), np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1])) # source 3D space xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)), np.vstack((xyz_ref, np.ones_like(x_ref))))[:3] # source view x, y K_xyz_src = np.matmul(intrinsics_src, xyz_src) xy_src = K_xyz_src[:2] / K_xyz_src[2:3] ## step2. reproject the source view points with source view depth estimation # find the depth estimation of the source view x_src = xy_src[0].reshape([height, width]).astype(np.float32) y_src = xy_src[1].reshape([height, width]).astype(np.float32) oor_mask = np.logical_or(np.logical_or(x_src >= width, x_src < 0),np.logical_or(y_src >= height, y_src < 0)) sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR) # print("depth_src",depth_src.shape, x_src.shape, y_src.shape) # sampled_depth_src=depth_src # mask = sampled_depth_src > 0 # source 3D space # NOTE that we should use sampled source-view depth_here to project back xyz_src = np.matmul(np.linalg.inv(intrinsics_src), np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1])) # reference 3D space xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)), np.vstack((xyz_src, np.ones_like(x_ref))))[:3] # source view x, y, depth depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32) K_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected) xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3] x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32) y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32) return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src, oor_mask def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src): width, height = depth_ref.shape[1], depth_ref.shape[0] x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height)) depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src, oor_mask = reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src) # check |p_reproj-p_1| < 1 dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2) # check |d_reproj-d_1| / d_1 < 0.01 depth_diff = np.abs(depth_reprojected - depth_ref) relative_depth_diff = depth_diff / depth_ref # H, W mask = np.logical_and(dist < 1, relative_depth_diff < 0.01) depth_reprojected[~mask] = 0 return mask, ~oor_mask, depth_reprojected, x2d_src, y2d_src def filter_by_masks(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt): xyz_world_lst=[] xyz_ref_lst=[] confidence_filtered_lst = [] B, N, H, W, _ = cam_xyz_all[0].shape cam_xyz_all = [cam_xyz.reshape(H, W, 3) for cam_xyz in cam_xyz_all] count = 0 for ref_view in tqdm(range(len(cam_xyz_all))): ref_intrinsics, ref_extrinsics, ref_cam_xy, ref_depth_est, confidence, points_mask = intrinsics_all[ref_view][0], extrinsics_all[ref_view][0], cam_xyz_all[ref_view][...,:-1], cam_xyz_all[ref_view][...,-1], confidence_all[ref_view][0,0,...], points_mask_all[ref_view][0,0,...] photo_mask = confidence > opt.depth_conf_thresh sum_srcview_depth_ests = 0 geo_mask_sum = 0 visible_and_match_sum = 0 visible_sum = 0 # compute the geometric mask for src_view in range(len(cam_xyz_all)): if ref_view == src_view: continue src_intrinsics, src_extrinsics, src_depth_est = intrinsics_all[src_view][0], extrinsics_all[src_view][0], cam_xyz_all[src_view][...,-1] geo_mask, vis_mask, depth_reprojected, x2d_src, y2d_src = check_geometric_consistency(ref_depth_est, ref_intrinsics, ref_extrinsics, src_depth_est, src_intrinsics, src_extrinsics) visible_sum += vis_mask.astype(np.float32) visible_and_match_sum += np.logical_and(vis_mask, geo_mask).astype(np.float32) geo_mask_sum += geo_mask.astype(np.int32) sum_srcview_depth_ests += depth_reprojected depth_est_averaged = (sum_srcview_depth_ests + ref_depth_est) / (geo_mask_sum + 1) # at least 3 source views matched geo_mask = geo_mask_sum >= opt.geo_cnsst_num final_mask = np.logical_and(np.logical_and(photo_mask, geo_mask), points_mask) # vis_geo_mask = np.divide(visible_and_match_sum, visible_sum, out=np.ones_like(visible_and_match_sum), where=visible_sum!=0) > 0.05 # final_mask = np.logical_and(np.logical_and(photo_mask, vis_geo_mask), points_mask) xy, depth = ref_cam_xy[final_mask,:], depth_est_averaged[final_mask][...,None] xyz_ref = np.concatenate([xy, depth], axis=-1) xyz_world = np.matmul(np.concatenate([xyz_ref, np.ones_like(xyz_ref[...,0:1])], axis=-1), np.transpose(np.linalg.inv(ref_extrinsics)))[:,:3] confidence_filtered = confidence[final_mask] xyz_world, xyz_ref, confidence_filtered = range_mask_np(xyz_world, xyz_ref, confidence_filtered, opt) xyz_world_lst.append(xyz_world) xyz_ref_lst.append(xyz_ref) confidence_filtered_lst.append(confidence_filtered) return xyz_ref_lst, xyz_world_lst, confidence_filtered_lst def range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_lst, opt): if opt.ranges[0] > -99.0: for i in range(len(xyz_world_all)): xyz_world, cam_xyz, confidence_filtered = range_mask_np(xyz_world_all[i], cam_xyz_all[i], confidence_filtered_lst[i], opt) xyz_world_all[i], cam_xyz_all[i], confidence_filtered_lst[i] = xyz_world, cam_xyz, confidence_filtered return xyz_world_all, cam_xyz_all, confidence_filtered_lst def range_mask_np(xyz_world, xyz_ref, confidence_filtered, opt): # print("range_mask_np") if opt.ranges[0] > -99.0: ranges = np.asarray(opt.ranges) mask = np.prod(np.logical_and(xyz_world >= ranges[None, :3], xyz_world <= ranges[None, 3:]), axis=-1) > 0 xyz_world = xyz_world[mask] xyz_ref = xyz_ref[mask] confidence_filtered = confidence_filtered[mask] return xyz_world, xyz_ref, confidence_filtered def range_mask_torch(xyz_world, xyz_ref, confidence_filtered, opt): # print("range_mask_torch") if opt.ranges[0] > -99.0: ranges = torch.as_tensor(opt.ranges, device=xyz_world.device, dtype=torch.float32) mask = torch.prod(torch.logical_and(xyz_world[..., :3] >= ranges[None, :3], xyz_world[..., :3] <= ranges[None, 3:]), dim=-1) > 0 xyz_world = xyz_world[mask] xyz_ref = xyz_ref[mask] confidence_filtered = confidence_filtered[mask] return xyz_world, xyz_ref, confidence_filtered def reproject_with_depth_gpu(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src): width, height = depth_ref.shape[1], depth_ref.shape[0] ## step1. project reference pixels to the source view # reference view x, y y_ref, x_ref = torch.meshgrid(torch.arange(0, height, device=depth_ref.device), torch.arange(0, width, device=depth_ref.device)) x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1]) # reference 3D space xyz_ref = torch.matmul(torch.linalg.inv(intrinsics_ref), torch.stack([x_ref, y_ref, torch.ones_like(x_ref, device=x_ref.device)], dim=0) * depth_ref.reshape([-1])) # source 3D space xyz_src = torch.matmul(torch.matmul(extrinsics_src, torch.linalg.inv(extrinsics_ref)), torch.cat([xyz_ref, torch.ones_like(x_ref)[None,:]], dim=0))[:3] # source view x, y K_xyz_src = torch.matmul(intrinsics_src, xyz_src) # 3, 6400000 xy_src = K_xyz_src[:2] / K_xyz_src[2:3] ## step2. reproject the source view points with source view depth estimation # find the depth estimation of the source view x_src = xy_src[0].reshape([height, width]).to(torch.float32) y_src = xy_src[1].reshape([height, width]).to(torch.float32) oor_mask = torch.logical_or(torch.logical_or(x_src >= width, x_src < 0), torch.logical_or(y_src >= height, y_src < 0)) # sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR) sampled_depth_src = F.grid_sample(depth_src[None, None, ...], torch.stack([x_src * 2 / (width-1) - 1, y_src * 2 / (height-1) - 1], dim=-1)[None,...], align_corners=True, mode='bilinear', padding_mode='border') # mask = sampled_depth_src > 0 # source 3D space # NOTE that we should use sampled source-view depth_here to project back xyz_src = torch.matmul(torch.linalg.inv(intrinsics_src), torch.cat([xy_src, torch.ones_like(x_ref)[None,:]], dim=0) * sampled_depth_src.reshape([-1])) # reference 3D space xyz_reprojected = torch.matmul(torch.matmul(extrinsics_ref, torch.linalg.inv(extrinsics_src)), torch.cat([xyz_src, torch.ones_like(x_ref)[None,:]], dim=0))[:3] # source view x, y, depth depth_reprojected = xyz_reprojected[2].reshape([height, width]).to(torch.float32) K_xyz_reprojected = torch.matmul(intrinsics_ref, xyz_reprojected) xy_reprojected = K_xyz_reprojected[:2] / K_xyz_reprojected[2:3] x_reprojected = xy_reprojected[0].reshape([height, width]).to(torch.float32) y_reprojected = xy_reprojected[1].reshape([height, width]).to(torch.float32) return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src, oor_mask def check_geometric_consistency_gpu(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src): width, height = depth_ref.shape[1], depth_ref.shape[0] y_ref, x_ref = torch.meshgrid(torch.arange(0, height, device=depth_ref.device), torch.arange(0, width, device=depth_ref.device)) depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src, oor_mask = reproject_with_depth_gpu(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src) # check |p_reproj-p_1| < 1 dist = torch.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2) # check |d_reproj-d_1| / d_1 < 0.01 depth_diff = torch.abs(depth_reprojected - depth_ref) relative_depth_diff = depth_diff / depth_ref # H, W mask = torch.logical_and(dist < 1, relative_depth_diff < 0.01) depth_reprojected[~mask] = 0 return mask, ~oor_mask, depth_reprojected, x2d_src, y2d_src def filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=False, return_w=False, cpu2gpu=False, near_fars_all=None): xyz_cam_lst=[] xyz_world_lst=[] confidence_filtered_lst = [] B, N, C, H, W, _ = cam_xyz_all[0].shape cam_xyz_all = [cam_xyz.view(C,H,W,3) for cam_xyz in cam_xyz_all] for cam_view in tqdm(range(len(cam_xyz_all))) if vis else range(len(cam_xyz_all)): near_fars = near_fars_all[cam_view] if near_fars_all is not None else None if opt.manual_depth_view > 1: xyz_cam, cam_xy, confidence, points_mask, cam_extrinsics = cam_xyz_all[cam_view], cam_xyz_all[cam_view][0, :, :, :-1], confidence_all[cam_view][0,...], points_mask_all[cam_view][0,...], extrinsics_all[cam_view][0] final_mask = torch.logical_and(confidence > opt.depth_conf_thresh, points_mask) xyz_cam = xyz_cam[final_mask] confidence *= 0.3 else: cam_intrinsics, cam_extrinsics, cam_xy, cam_depth_est, confidence, points_mask = intrinsics_all[cam_view][0], extrinsics_all[cam_view][0], cam_xyz_all[cam_view][0,...,:-1], cam_xyz_all[cam_view][0,...,-1], confidence_all[cam_view][0,0,...], points_mask_all[cam_view][0,0,...] if cpu2gpu: cam_xy, cam_depth_est, confidence, points_mask = cam_xy.cuda(), cam_depth_est.cuda(), confidence.cuda(), points_mask.cuda() sum_srcview_depth_ests = 0 geo_mask_sum = 0 visible_and_match_sum = 0 visible_and_not_match_sum = 0 visible_sum = 0 # compute the geometric mask for src_view in range(len(cam_xyz_all)): if cam_view == src_view: continue src_intrinsics, src_extrinsics, src_depth_est = intrinsics_all[src_view][0], extrinsics_all[src_view][0], cam_xyz_all[src_view][0,...,-1] if cpu2gpu: src_depth_est = src_depth_est.cuda() geo_mask, vis_mask, depth_reprojected, x2d_src, y2d_src = check_geometric_consistency_gpu(cam_depth_est, cam_intrinsics, cam_extrinsics, src_depth_est, src_intrinsics, src_extrinsics) visible_sum += vis_mask.to(torch.float32) visible_and_match_sum += torch.logical_and(vis_mask, geo_mask).to(torch.float32) visible_and_not_match_sum += torch.logical_and(vis_mask, ~geo_mask).to(torch.float32) geo_mask_sum += geo_mask.to(torch.int32) sum_srcview_depth_ests += depth_reprojected depth_est_averaged = (sum_srcview_depth_ests + cam_depth_est) / (geo_mask_sum + 1) # at least 3 source views matched geo_mask = geo_mask_sum >= opt.geo_cnsst_num # visible_and_not_match_sum < 3 # final_mask = torch.logical_and(confidence > opt.depth_conf_thresh, points_mask) final_mask = torch.logical_and(final_mask, geo_mask) if len(cam_xyz_all)>1 else final_mask xy, depth = cam_xy[final_mask,:], depth_est_averaged[final_mask][...,None] xyz_cam = torch.cat([xy, depth], dim=-1) confidence_filtered = confidence[final_mask] if opt.default_conf > 1.0: assert opt.manual_depth_view <= 1 confidence_filtered = reassign_conf(confidence_filtered, final_mask, geo_mask_sum, opt.geo_cnsst_num) if opt.far_plane_shift is not None: assert near_fars is not None bg_mask = ~final_mask if final_mask.dim() == 2 else (torch.sum(final_mask, dim=0) < 1) bg_xy = cam_xy[bg_mask,:] xyz_cam_extra = torch.cat([bg_xy, torch.ones_like(bg_xy[...,:1]) * near_fars[1] + opt.far_plane_shift], dim=-1) xyz_cam = torch.cat([xyz_cam, xyz_cam_extra], dim=0) confidence_extra = torch.ones_like(xyz_cam_extra[...,-1]) * 0.02 confidence_filtered = torch.cat([confidence_filtered, confidence_extra], dim=0) xyz_world = torch.cat([xyz_cam, torch.ones_like(xyz_cam[...,0:1])], axis=-1) @ torch.inverse(cam_extrinsics).transpose(0,1) # print("xyz_world",xyz_world.shape) xyz_world, xyz_cam, confidence_filtered = range_mask_torch(xyz_world, xyz_cam, confidence_filtered, opt) xyz_cam_lst.append(xyz_cam.cpu() if cpu2gpu else xyz_cam) xyz_world_lst.append(xyz_world[:,:3].cpu() if cpu2gpu else xyz_world[:,:3]) confidence_filtered_lst.append(confidence_filtered.cpu() if cpu2gpu else confidence_filtered) return xyz_cam_lst, xyz_world_lst, confidence_filtered_lst def reassign_conf(confidence_filtered, final_mask, geo_mask_sum, geo_cnsst_num): geo_mask_sum = geo_mask_sum[final_mask] - geo_cnsst_num + 1 confidence_filtered *= (1 - 1.0 / torch.pow(1.14869, torch.clamp(geo_mask_sum, min=1, max=10))) # 1.14869 = root 2 by 5 return confidence_filtered
16,375
53.586667
287
py
pointnerf
pointnerf-master/models/mvs/models.py
import torch torch.autograd.set_detect_anomaly(True) import torch.nn as nn from .mvs_utils import * from .mvs_utils import homo_warp from inplace_abn import InPlaceABN from .renderer import run_network_mvs from ..depth_estimators.mvsnet import MVSNet as Ofcl_MVSNet device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def weights_init(m): if isinstance(m, nn.Linear): nn.init.kaiming_normal_(m.weight.data) if m.bias is not None: nn.init.zeros_(m.bias.data) class Embedder: def __init__(self, **kwargs): self.kwargs = kwargs self.create_embedding_fn() def create_embedding_fn(self): embed_fns = [] d = self.kwargs['input_dims'] out_dim = 0 if self.kwargs['include_input']: embed_fns.append(lambda x : x) out_dim += d max_freq = self.kwargs['max_freq_log2'] N_freqs = self.kwargs['num_freqs'] if self.kwargs['log_sampling']: freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs) else: freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs) self.freq_bands = freq_bands.reshape(1,-1,1).cuda() for freq in freq_bands: for p_fn in self.kwargs['periodic_fns']: embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq)) out_dim += d self.embed_fns = embed_fns self.out_dim = out_dim def embed(self, inputs): repeat = inputs.dim()-1 inputs_scaled = (inputs.unsqueeze(-2) * self.freq_bands.view(*[1]*repeat,-1,1)).reshape(*inputs.shape[:-1],-1) inputs_scaled = torch.cat((inputs, torch.sin(inputs_scaled), torch.cos(inputs_scaled)),dim=-1) return inputs_scaled def get_embedder(multires, i=0, input_dims=3): if i == -1: return nn.Identity(), 3 embed_kwargs = { 'include_input' : True, 'input_dims' : input_dims, 'max_freq_log2' : multires-1, 'num_freqs' : multires, 'log_sampling' : True, 'periodic_fns' : [torch.sin, torch.cos], } embedder_obj = Embedder(**embed_kwargs) embed = lambda x, eo=embedder_obj : eo.embed(x) return embed, embedder_obj.out_dim class ScaledDotProductAttention(nn.Module): ''' Scaled Dot-Product Attention ''' def __init__(self, temperature, attn_dropout=0.1): super().__init__() self.temperature = temperature # self.dropout = nn.Dropout(attn_dropout) def forward(self, q, k, v, mask=None): attn = torch.matmul(q / self.temperature, k.transpose(2, 3)) if mask is not None: attn = attn.masked_fill(mask == 0, -1e9) # attn = attn * mask attn = F.softmax(attn, dim=-1) # attn = self.dropout(F.softmax(attn, dim=-1)) output = torch.matmul(attn, v) return output, attn class MultiHeadAttention(nn.Module): ''' Multi-Head Attention module ''' def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1): super().__init__() self.n_head = n_head self.d_k = d_k self.d_v = d_v self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) self.fc = nn.Linear(n_head * d_v, d_model, bias=False) self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5) # self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) def forward(self, q, k, v, mask=None): d_k, d_v, n_head = self.d_k, self.d_v, self.n_head sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1) residual = q # Pass through the pre-attention projection: b x lq x (n*dv) # Separate different heads: b x lq x n x dv q = self.w_qs(q).view(sz_b, len_q, n_head, d_k) k = self.w_ks(k).view(sz_b, len_k, n_head, d_k) v = self.w_vs(v).view(sz_b, len_v, n_head, d_v) # Transpose for attention dot product: b x n x lq x dv q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) if mask is not None: mask = mask.unsqueeze(1) # For head axis broadcasting. q, attn = self.attention(q, k, v, mask=mask) # Transpose to move the head dimension back: b x lq x n x dv # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv) q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1) q = self.fc(q) q += residual q = self.layer_norm(q) return q, attn class Renderer_ours(nn.Module): def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False): """ """ super(Renderer_ours, self).__init__() self.D = D self.W = W self.input_ch = input_ch self.input_ch_views = input_ch_views self.skips = skips self.use_viewdirs = use_viewdirs self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat self.pts_linears = nn.ModuleList( [nn.Linear(self.in_ch_pts, W, bias=True)] + [nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + self.in_ch_pts, W) for i in range(D-1)]) self.pts_bias = nn.Linear(input_ch_feat, W) self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)]) if use_viewdirs: self.feature_linear = nn.Linear(W, W) self.alpha_linear = nn.Linear(W, 1) self.rgb_linear = nn.Linear(W//2, 3) else: self.output_linear = nn.Linear(W, output_ch) self.pts_linears.apply(weights_init) self.views_linears.apply(weights_init) self.feature_linear.apply(weights_init) self.alpha_linear.apply(weights_init) self.rgb_linear.apply(weights_init) def forward_alpha(self, x): dim = x.shape[-1] in_ch_feat = dim-self.in_ch_pts input_pts, input_feats = torch.split(x, [self.in_ch_pts, in_ch_feat], dim=-1) h = input_pts bias = self.pts_bias(input_feats) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) * bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) alpha = torch.relu(self.alpha_linear(h)) return alpha def forward(self, x): dim = x.shape[-1] in_ch_feat = dim-self.in_ch_pts-self.in_ch_views input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1) h = input_pts bias = self.pts_bias(input_feats) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) * bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) if self.use_viewdirs: alpha = torch.relu(self.alpha_linear(h)) feature = self.feature_linear(h) h = torch.cat([feature, input_views], -1) for i, l in enumerate(self.views_linears): h = self.views_linears[i](h) h = F.relu(h) rgb = torch.sigmoid(self.rgb_linear(h)) outputs = torch.cat([rgb, alpha], -1) else: outputs = self.output_linear(h) return outputs class Renderer_color_fusion(nn.Module): def __init__(self, D=8, W=128, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4],use_viewdirs=False): """ """ super(Renderer_color_fusion, self).__init__() self.D = D self.W = W self.input_ch = input_ch self.input_ch_views = input_ch_views self.skips = skips self.use_viewdirs = use_viewdirs self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat self.pts_linears = nn.ModuleList( [nn.Linear(input_ch, W, bias=True)] + [ nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + input_ch, W) for i in range(D - 1)]) self.pts_bias = nn.Linear(input_ch_feat, W) attension_dim = 16 + 3 + self.in_ch_views//3 # 16 + rgb dim + angle dim self.ray_attention = MultiHeadAttention(4, attension_dim, 4, 4) if use_viewdirs: self.feature_linear = nn.Sequential(nn.Linear(W, 16), nn.ReLU()) self.alpha_linear = nn.Sequential(nn.Linear(W, 1), nn.ReLU()) self.rgb_out = nn.Sequential(nn.Linear(attension_dim, 3),nn.Sigmoid()) # else: self.output_linear = nn.Linear(W, output_ch) self.pts_linears.apply(weights_init) self.feature_linear.apply(weights_init) self.alpha_linear.apply(weights_init) self.rgb_out.apply(weights_init) def forward_alpha(self,x): input_pts, input_feats = torch.split(x, [self.in_ch_pts, self.in_ch_feat], dim=-1) h = input_pts bias = self.pts_bias(input_feats) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) * bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) alpha = self.alpha_linear(h) return alpha def forward(self, x): dim = x.shape[-1] in_ch_feat = dim - self.in_ch_pts - self.in_ch_views input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1) h = input_pts bias = self.pts_bias(input_feats) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) * bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) alpha = self.alpha_linear(h) # color input_views = input_views.reshape(-1, 3, self.in_ch_views//3) rgb = input_feats[..., 8:].reshape(-1, 3, 4) rgb_in = rgb[..., :3] N = rgb.shape[0] feature = self.feature_linear(h) h = feature.reshape(N, 1, -1).expand(-1, 3, -1) h = torch.cat((h, input_views, rgb_in), dim=-1) h, _ = self.ray_attention(h, h, h, mask=rgb[...,-1:]) rgb = self.rgb_out(h) rgb = torch.sum(rgb , dim=1).reshape(*alpha.shape[:2], 3) outputs = torch.cat([rgb, alpha], -1) return outputs class Renderer_attention2(nn.Module): def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False): """ """ super(Renderer_attention, self).__init__() self.D = D self.W = W self.input_ch = input_ch self.input_ch_views = input_ch_views self.skips = skips self.use_viewdirs = use_viewdirs self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat self.attension_dim = 4 + 8 self.color_attention = MultiHeadAttention(4, self.attension_dim, 4, 4) self.weight_out = nn.Linear(self.attension_dim, 3) self.pts_linears = nn.ModuleList( [nn.Linear(self.in_ch_pts, W, bias=True)] + [nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + self.in_ch_pts, W) for i in range(D-1)]) self.pts_bias = nn.Linear(11, W) self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)]) if use_viewdirs: self.feature_linear = nn.Linear(W, W) self.alpha_linear = nn.Linear(W, 1) self.rgb_linear = nn.Linear(W//2, 3) else: self.output_linear = nn.Linear(W, output_ch) self.pts_linears.apply(weights_init) self.views_linears.apply(weights_init) self.feature_linear.apply(weights_init) self.alpha_linear.apply(weights_init) self.rgb_linear.apply(weights_init) def forward(self, x): N_ray, N_sample, dim = x.shape in_ch_feat = dim-self.in_ch_pts-self.in_ch_views input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1) if input_feats.shape[-1]>8+3: colors = input_feats[...,8:].view(N_ray*N_sample,-1,4) weight = torch.cat((colors,input_feats[...,:8].reshape(N_ray*N_sample, 1, -1).expand(-1, colors.shape[-2], -1)),dim=-1) weight, _ = self.color_attention(weight, weight, weight) colors = torch.sum(self.weight_out(weight),dim=-2).view(N_ray, N_sample, -1) # colors = self.weight_out(input_feats) else: colors = input_feats[...,-3:] h = input_pts # bias = self.pts_bias(colors) bias = self.pts_bias(torch.cat((input_feats[...,:8],colors),dim=-1)) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) * bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) if self.use_viewdirs: alpha = torch.relu(self.alpha_linear(h)) feature = self.feature_linear(h) h = torch.cat([feature, input_views], -1) for i, l in enumerate(self.views_linears): h = self.views_linears[i](h) h = F.relu(h) rgb = torch.sigmoid(self.rgb_linear(h)) outputs = torch.cat([rgb, alpha], -1) else: outputs = self.output_linear(h) outputs = torch.cat((outputs,colors), dim=-1) return outputs class Renderer_attention(nn.Module): def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False): """ """ super(Renderer_attention, self).__init__() self.D = D self.W = W self.input_ch = input_ch self.input_ch_views = input_ch_views self.skips = skips self.use_viewdirs = use_viewdirs self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat self.attension_dim = 4 + 8 self.color_attention = MultiHeadAttention(4, self.attension_dim, 4, 4) self.weight_out = nn.Linear(self.attension_dim, 3) # self.weight_out = nn.Linear(self.in_ch_feat, 8) self.pts_linears = nn.ModuleList( [nn.Linear(self.in_ch_pts, W, bias=True)] + [nn.Linear(W, W, bias=True)]*(D-1)) self.pts_bias = nn.Linear(11, W) self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)]) if use_viewdirs: self.feature_linear = nn.Linear(W, W) self.alpha_linear = nn.Linear(W, 1) self.rgb_linear = nn.Linear(W//2, 3) else: self.output_linear = nn.Linear(W, output_ch) self.pts_linears.apply(weights_init) self.views_linears.apply(weights_init) self.feature_linear.apply(weights_init) self.alpha_linear.apply(weights_init) self.rgb_linear.apply(weights_init) def forward(self, x): N_ray, N_sample, dim = x.shape in_ch_feat = dim-self.in_ch_pts-self.in_ch_views input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1) if input_feats.shape[-1]>8+3: colors = input_feats[...,8:].view(N_ray*N_sample,-1,4) weight = torch.cat((colors,input_feats[...,:8].reshape(N_ray*N_sample, 1, -1).expand(-1, colors.shape[-2], -1)),dim=-1) weight, _ = self.color_attention(weight, weight, weight) colors = torch.sum(torch.sigmoid(self.weight_out(weight)),dim=-2).view(N_ray, N_sample, -1) # colors = self.weight_out(input_feats) else: colors = input_feats[...,-3:] h = input_pts # bias = self.pts_bias(colors) bias = self.pts_bias(torch.cat((input_feats[...,:8],colors),dim=-1)) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) + bias h = F.relu(h) # if i in self.skips: # h = torch.cat([input_pts, h], -1) if self.use_viewdirs: alpha = torch.relu(self.alpha_linear(h)) feature = self.feature_linear(h) h = torch.cat([feature, input_views], -1) for i, l in enumerate(self.views_linears): h = self.views_linears[i](h) h = F.relu(h) rgb = torch.sigmoid(self.rgb_linear(h)) outputs = torch.cat([rgb, alpha, colors], -1) else: outputs = self.output_linear(h) outputs = torch.cat((outputs,colors), dim=-1) return outputs class Renderer_linear(nn.Module): def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, input_ch_feat=8, skips=[4], use_viewdirs=False): """ """ super(Renderer_linear, self).__init__() self.D = D self.W = W self.input_ch = input_ch self.input_ch_views = input_ch_views self.skips = skips self.use_viewdirs = use_viewdirs self.in_ch_pts, self.in_ch_views, self.in_ch_feat = input_ch, input_ch_views, input_ch_feat self.pts_linears = nn.ModuleList( [nn.Linear(input_ch, W, bias=True)] + [nn.Linear(W, W, bias=True) if i not in self.skips else nn.Linear(W + input_ch, W) for i in range(D-1)]) self.pts_bias = nn.Linear(input_ch_feat, W) self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W//2)]) if use_viewdirs: self.feature_linear = nn.Linear(W, W) self.alpha_linear = nn.Linear(W, 1) self.rgb_linear = nn.Linear(W//2, 3) else: self.output_linear = nn.Linear(W, output_ch) self.pts_linears.apply(weights_init) self.views_linears.apply(weights_init) self.feature_linear.apply(weights_init) self.alpha_linear.apply(weights_init) self.rgb_linear.apply(weights_init) def forward_alpha(self,x): dim = x.shape[-1] input_pts, input_feats = torch.split(x, [self.in_ch_pts, self.in_ch_feat], dim=-1) h = input_pts bias = self.pts_bias(input_feats) for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) + bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) alpha = self.alpha_linear(h) return alpha def forward(self, x): dim = x.shape[-1] in_ch_feat = dim-self.in_ch_pts-self.in_ch_views input_pts, input_feats, input_views = torch.split(x, [self.in_ch_pts, in_ch_feat, self.in_ch_views], dim=-1) h = input_pts bias = self.pts_bias(input_feats) #if in_ch_feat == self.in_ch_feat else input_feats for i, l in enumerate(self.pts_linears): h = self.pts_linears[i](h) + bias h = F.relu(h) if i in self.skips: h = torch.cat([input_pts, h], -1) if self.use_viewdirs: alpha = torch.relu(self.alpha_linear(h)) feature = self.feature_linear(h) h = torch.cat([feature, input_views], -1) for i, l in enumerate(self.views_linears): h = self.views_linears[i](h) h = F.relu(h) rgb = torch.sigmoid(self.rgb_linear(h)) outputs = torch.cat([rgb, alpha], -1) else: outputs = self.output_linear(h) return outputs class MVSNeRF(nn.Module): def __init__(self, D=8, W=256, input_ch_pts=3, input_ch_views=3, input_ch_feat=8, skips=[4], net_type='v2'): """ """ super(MVSNeRF, self).__init__() self.in_ch_pts, self.in_ch_views,self.in_ch_feat = input_ch_pts, input_ch_views, input_ch_feat # we provide two version network structure if 'v0' == net_type: self.nerf = Renderer_ours(D=D, W=W,input_ch_feat=input_ch_feat, input_ch=input_ch_pts, output_ch=4, skips=skips, input_ch_views=input_ch_views, use_viewdirs=True) elif 'v1' == net_type: self.nerf = Renderer_attention(D=D, W=W,input_ch_feat=input_ch_feat, input_ch=input_ch_pts, output_ch=4, skips=skips, input_ch_views=input_ch_views, use_viewdirs=True) elif 'v2' == net_type: self.nerf = Renderer_linear(D=D, W=W,input_ch_feat=input_ch_feat, input_ch=input_ch_pts, output_ch=4, skips=skips, input_ch_views=input_ch_views, use_viewdirs=True) def forward_alpha(self, x): return self.nerf.forward_alpha(x) def forward(self, x): RGBA = self.nerf(x) return RGBA def create_nerf_mvs(args, pts_embedder=True, use_mvs=False, dir_embedder=True, Depth=128): """Instantiate mvs NeRF's MLP model. """ if pts_embedder: embed_fn, input_ch = get_embedder(args.multires, args.i_embed, input_dims=args.pts_dim) else: embed_fn, input_ch = None, args.pts_dim embeddirs_fn = None if dir_embedder: embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed, input_dims=args.dir_dim) else: embeddirs_fn, input_ch_views = None, args.dir_dim skips = [4] model = MVSNeRF(D=args.netdepth, W=args.netwidth, input_ch_pts=input_ch, skips=skips, input_ch_views=input_ch_views, input_ch_feat=args.feat_dim, net_type=args.net_type).to(device) grad_vars = [] grad_vars += list(model.parameters()) model_fine = None if args.N_importance > 0: model_fine = MVSNeRF(D=args.netdepth, W=args.netwidth, input_ch_pts=input_ch, skips=skips, input_ch_views=input_ch_views, input_ch_feat=args.feat_dim).to(device) grad_vars += list(model_fine.parameters()) network_query_fn = lambda pts, viewdirs, rays_feats, network_fn: run_network_mvs(pts, viewdirs, rays_feats, network_fn, embed_fn=embed_fn, embeddirs_fn=embeddirs_fn, netchunk=args.netchunk) EncodingNet = None net_2d = FeatureNet(intermediate=True) if use_mvs: EncodingNet = MVSNet(net_2d, Depth=Depth).to(device) grad_vars += list(EncodingNet.parameters()) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! start = 0 ########################## # Load checkpoints ckpts = [] if args.ckpt is not None and args.ckpt != 'None': ckpts = [args.ckpt] print('Found ckpts', ckpts) if len(ckpts) > 0 : ckpt_path = ckpts[-1] print('Reloading from', ckpt_path) ckpt = torch.load(ckpt_path) # Load model if use_mvs: state_dict = ckpt['network_mvs_state_dict'] EncodingNet.load_state_dict(state_dict) model.load_state_dict(ckpt['network_fn_state_dict']) # if model_fine is not None: # model_fine.load_state_dict(ckpt['network_fine_state_dict']) ########################## render_kwargs_train = { 'network_query_fn': network_query_fn, 'perturb': args.perturb, 'N_importance': args.N_importance, 'network_fine': model_fine, 'N_samples': args.N_samples, 'network_fn': model, 'network_mvs': EncodingNet, 'use_viewdirs': args.use_viewdirs, 'white_bkgd': args.white_bkgd, 'raw_noise_std': args.raw_noise_std, } render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train} render_kwargs_test['perturb'] = False return render_kwargs_train, render_kwargs_test, start, grad_vars def create_mvs(args, mvs_mode=-1, depth=128): """Instantiate mvs NeRF's MLP model. """ net_2d = FeatureNet(intermediate=True).to(device) EncodingNet = None if mvs_mode == -1: EncodingNet = MVSNet(depth=depth).to(device) elif mvs_mode >= 1: EncodingNet = Ofcl_MVSNet(refine=False).to(device) EncodingNet.eval() start = 0 render_kwargs_train = { 'network_featmvs': EncodingNet, 'network_2d': net_2d, } render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train} render_kwargs_test['perturb'] = False return render_kwargs_train, render_kwargs_test, start ############################################# MVS Net models ################################################ class ConvBnReLU(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1, norm_act=InPlaceABN): super(ConvBnReLU, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = norm_act(out_channels) def forward(self, x): return self.bn(self.conv(x)) class ConvBnReLU3D(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1, norm_act=InPlaceABN): super(ConvBnReLU3D, self).__init__() self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False) self.bn = norm_act(out_channels) # self.bn = nn.ReLU() def forward(self, x): return self.bn(self.conv(x)) ################################### feature net ###################################### class FeatureNet(nn.Module): """ output 3 levels of features using a FPN structure """ def __init__(self, intermediate=False, norm_act=InPlaceABN): super(FeatureNet, self).__init__() self.conv0 = nn.Sequential( ConvBnReLU(3, 8, 3, 1, 1, norm_act=norm_act), ConvBnReLU(8, 8, 3, 1, 1, norm_act=norm_act)) self.conv1 = nn.Sequential( ConvBnReLU(8, 16, 5, 2, 2, norm_act=norm_act), ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act), ConvBnReLU(16, 16, 3, 1, 1, norm_act=norm_act)) self.conv2 = nn.Sequential( ConvBnReLU(16, 32, 5, 2, 2, norm_act=norm_act), ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act), ConvBnReLU(32, 32, 3, 1, 1, norm_act=norm_act)) self.toplayer = nn.Conv2d(32, 32, 1) self.intermediate = intermediate def _upsample_add(self, x, y): return F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) + y def forward(self, x): B, V, _, H, W = x.shape x = x.reshape(B * V, 3, H, W) if self.intermediate: x1 = self.conv0(x) # (B, 8, H, W) x2 = self.conv1(x1) # (B, 16, H//2, W//2) x3 = self.conv2(x2) # (B, 32, H//4, W//4) x3 = self.toplayer(x3) # (B, 32, H//4, W//4) return [x, x1, x2, x3] else: # x: (B, 3, H, W) x = self.conv0(x) # (B, 8, H, W) x = self.conv1(x) # (B, 16, H//2, W//2) x = self.conv2(x) # (B, 32, H//4, W//4) x = self.toplayer(x) # (B, 32, H//4, W//4) return [x] class CostRegNet(nn.Module): def __init__(self, in_channels, norm_act=InPlaceABN): super(CostRegNet, self).__init__() self.conv0 = ConvBnReLU3D(in_channels, 8, norm_act=norm_act) self.conv1 = ConvBnReLU3D(8, 16, stride=2, norm_act=norm_act) self.conv2 = ConvBnReLU3D(16, 16, norm_act=norm_act) self.conv3 = ConvBnReLU3D(16, 32, stride=2, norm_act=norm_act) self.conv4 = ConvBnReLU3D(32, 32, norm_act=norm_act) self.conv5 = ConvBnReLU3D(32, 64, stride=2, norm_act=norm_act) self.conv6 = ConvBnReLU3D(64, 64, norm_act=norm_act) self.conv7 = nn.Sequential( nn.ConvTranspose3d(64, 32, 3, padding=1, output_padding=1, stride=2, bias=False), norm_act(32)) self.conv9 = nn.Sequential( nn.ConvTranspose3d(32, 16, 3, padding=1, output_padding=1, stride=2, bias=False), norm_act(16)) self.conv11 = nn.Sequential( nn.ConvTranspose3d(16, 8, 3, padding=1, output_padding=1, stride=2, bias=False), norm_act(8)) # self.conv12 = nn.Conv3d(8, 8, 3, stride=1, padding=1, bias=True) def forward(self, x): conv0 = self.conv0(x) conv2 = self.conv2(self.conv1(conv0)) conv4 = self.conv4(self.conv3(conv2)) x = self.conv6(self.conv5(conv4)) x = conv4 + self.conv7(x) del conv4 x = conv2 + self.conv9(x) del conv2 x = conv0 + self.conv11(x) del conv0 # x = self.conv12(x) return x class ProbNet(nn.Module): def __init__(self, in_channels, norm_act=InPlaceABN): super(ProbNet, self).__init__() self.conv0 = ConvBnReLU3D(in_channels, 1, norm_act=norm_act) def forward(self, x): x = F.softmax(self.conv0(x), dim=2) return x class MVSNet(nn.Module): def __init__(self, depth=128, num_groups=1, norm_act=InPlaceABN, levels=1): super(MVSNet, self).__init__() self.levels = levels # 3 depth levels self.n_depths = [128,32,8] self.G = num_groups # number of groups in groupwise correlation self.N_importance = 0 self.chunk = 1024 self.D = depth self.cost_reg_2 = CostRegNet(32+9, norm_act) def build_volume_costvar(self, feats, proj_mats, depth_values, pad=0): # feats: (B, V, C, H, W) # proj_mats: (B, V, 3, 4) # depth_values: (B, D, H, W) # cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w) # volume_sum [B, G, D, h, w] # prob_volume [B D H W] # volume_feature [B C D H W] B, V, C, H, W = feats.shape D = depth_values.shape[1] ref_feats, src_feats = feats[:, 0], feats[:, 1:] src_feats = src_feats.permute(1, 0, 2, 3, 4) # (V-1, B, C, h, w) proj_mats = proj_mats[:, 1:] proj_mats = proj_mats.permute(1, 0, 2, 3) # (V-1, B, 3, 4) if pad > 0: ref_feats = F.pad(ref_feats, (pad, pad, pad, pad), "constant", 0) ref_volume = ref_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w) volume_sum = ref_volume volume_sq_sum = ref_volume ** 2 del ref_feats in_masks = torch.ones((B, 1, D, H + pad * 2, W + pad * 2), device=volume_sum.device) for i, (src_feat, proj_mat) in enumerate(zip(src_feats, proj_mats)): warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad) grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2) in_mask = ((grid > -1.0) * (grid < 1.0)) in_mask = (in_mask[..., 0] * in_mask[..., 1]) in_masks += in_mask.float() if self.training: volume_sum = volume_sum + warped_volume volume_sq_sum = volume_sq_sum + warped_volume ** 2 else: volume_sum += warped_volume volume_sq_sum += warped_volume.pow_(2) del warped_volume, src_feat, proj_mat del src_feats, proj_mats count = 1.0 / in_masks img_feat = volume_sq_sum * count - (volume_sum * count) ** 2 del volume_sq_sum, volume_sum, count return img_feat, in_masks def build_volume_costvar_img(self, imgs, feats, proj_mats, depth_values, pad=0, vid=0): # feats: (B, V, C, H, W) # proj_mats: (B, V, 3, 4) # depth_values: (B, D, H, W) # cost_reg: nn.Module of input (B, C, D, h, w) and output (B, 1, D, h, w) # volume_sum [B, G, D, h, w] # prob_volume [B D H W] # volume_feature [B C D H W] B, V, C, H, W = feats.shape D = depth_values.shape[1] cur_feats, src_feats = feats[:, vid, ...], feats.permute(1, 0, 2, 3, 4) # (V, B, C, h, w) proj_mats = proj_mats.permute(1, 0, 2, 3) # (V, B, 3, 4) if pad > 0: cur_feats = F.pad(cur_feats, (pad, pad, pad, pad), "constant", 0) img_feat = torch.empty((B, 9 + 32, D, *cur_feats.shape[-2:]), device=feats.device, dtype=torch.float) imgs = F.interpolate(imgs.view(B * V, *imgs.shape[2:]), (H, W), mode='bilinear', align_corners=False).view(B, V,-1,H,W).permute(1, 0, 2, 3, 4) img_feat[:, :3, :, pad:H + pad, pad:W + pad] = imgs[0].unsqueeze(2).expand(-1, -1, D, -1, -1) cur_volume = cur_feats.unsqueeze(2).repeat(1, 1, D, 1, 1) # (B, C, D, h, w) volume_sum = cur_volume volume_sq_sum = cur_volume ** 2 del cur_feats src_view_count = 0 in_masks = torch.ones((B, V, D, H + pad * 2, W + pad * 2), device=volume_sum.device) for i, (src_img, src_feat, proj_mat) in enumerate(zip(imgs, src_feats, proj_mats)): # warped_volume: 1, 32, 128, 176, 208 B, D, H_pad, W_pad , grid B, D, W_pad, H_pad if i == vid: continue src_view_count+=1 warped_volume, grid = homo_warp(src_feat, proj_mat, depth_values, pad=pad) img_feat[:, src_view_count * 3:(src_view_count + 1) * 3], _ = homo_warp(src_img, proj_mat, depth_values, src_grid=grid, pad=pad) grid = grid.view(B, 1, D, H + pad * 2, W + pad * 2, 2) in_mask = ((grid > -1.0) * (grid < 1.0)) in_mask = (in_mask[..., 0] * in_mask[..., 1]) in_masks[:, src_view_count] = in_mask.float() if self.training: volume_sum = volume_sum + warped_volume volume_sq_sum = volume_sq_sum + warped_volume ** 2 else: volume_sum += warped_volume volume_sq_sum += warped_volume.pow_(2) del warped_volume, src_feat, proj_mat del src_feats, proj_mats count = 1.0 / torch.sum(in_masks, dim=1, keepdim=True) img_feat[:, -32:] = volume_sq_sum * count - (volume_sum * count) ** 2 del volume_sq_sum, volume_sum, count return img_feat, in_masks def forward(self, imgs, feats, proj_mats, near_far, pad=0, return_color=False, lindisp=False, vid=0): # imgs: (B, V, 3, H, W) # proj_mats: (B, V, 3, 4) from fine to coarse # init_depth_min, depth_interval: (B) or float # near_far (B, V, 2) B, V, _, H, W = imgs.shape imgs = imgs.reshape(B * V, 3, H, W) imgs = imgs.view(B, V, 3, H, W) feats_l = feats[-1] # (B*V, C, h, w) feats_l = feats_l.view(B, V, *feats_l.shape[1:]) # (B, V, C, h, w) t_vals = torch.linspace(0., 1., steps=self.D, device=imgs.device, dtype=imgs.dtype) # (B, D) near, far = near_far # assume batch size==1 if not lindisp: depth_values = near * (1.-t_vals) + far * (t_vals) else: depth_values = 1. / (1. / near * (1. - t_vals) + 1. / far * (t_vals)) # print("near , far", near, far) # print("depth_values", depth_values) depth_values = depth_values.unsqueeze(0) # volume_feat, in_masks = self.build_volume_costvar(feats_l, proj_mats, depth_values, pad=pad) volume_feat, in_masks = self.build_volume_costvar_img(imgs, feats_l, proj_mats, depth_values, pad=pad, vid=vid) if return_color: feats_l = torch.cat((volume_feat[:,:V*3].view(B, V, 3, *volume_feat.shape[2:]),in_masks.unsqueeze(2)),dim=2) # print("pre cost volume_feat", volume_feat.shape) ([1, 41, 128, 176, 208]) volume_feat = self.cost_reg_2(volume_feat) # (B, 1, D, h, w) volume_feat = volume_feat.reshape(1,-1,*volume_feat.shape[2:]) return volume_feat, feats, depth_values class RefVolume(nn.Module): def __init__(self, volume): super(RefVolume, self).__init__() self.feat_volume = nn.Parameter(volume) def forward(self, ray_coordinate_ref): '''coordinate: [N, 3] z,x,y ''' device = self.feat_volume.device H, W = ray_coordinate_ref.shape[-3:-1] grid = ray_coordinate_ref.view(-1, 1, H, W, 3).to(device) * 2 - 1.0 # [1 1 H W 3] (x,y,z) features = F.grid_sample(self.feat_volume, grid, align_corners=True, mode='bilinear')[:, :, 0].permute(2, 3, 0,1).squeeze() return features
37,147
35.963184
166
py
pointnerf
pointnerf-master/run/editing.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import glob import copy import torch import numpy as np import time from options import TrainOptions from options import EditOptions from data import create_data_loader, create_dataset from models import create_model from models.mvs.mvs_points_model import MvsPointsModel from models.mvs import mvs_utils, filter_utils from utils.visualizer import Visualizer from utils import format as fmt from run.evaluate import report_metrics from render_vid import render_vid torch.manual_seed(0) np.random.seed(0) import cv2 from PIL import Image from tqdm import tqdm def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) def save_image(img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def masking(mask, firstdim_lst, seconddim_lst): first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst] second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst] return first_lst, second_lst def render(model, dataset, visualizer, opt, gen_vid=False): print('-----------------------------------Testing-----------------------------------') model.eval() total_num = dataset.render_total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() cam_posts = [] cam_dirs = [] for i in range(0, total_num): data = dataset.get_dummyrot_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() # cam_posts.append(data['campos']) # cam_dirs.append(data['raydir'] + data['campos'][None,...]) # continue visuals = None stime = time.time() for k in range(0, height * width, chunk_size): start = k end = min([k + chunk_size, height * width]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] # print("tmpgts", tmpgts["gt_image"].shape) # print(data["pixel_idx"]) model.set_input(data) model.test() curr_visuals = model.get_current_visuals(data=data) if visuals is None: visuals = {} for key, value in curr_visuals.items(): chunk = value.cpu().numpy() visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype) visuals[key][start:end, :] = chunk else: for key, value in curr_visuals.items(): visuals[key][start:end, :] = value.cpu().numpy() for key, value in visuals.items(): visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i) # visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False) # visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False) # print("vis") # exit() print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num), 0) print('--------------------------------Finish generating vid--------------------------------') return def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def load_parts_info(opt, name, inds_name, trans_name): resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(os.path.join(opt.checkpoints_dir , "edit_srcs" , name)) checkpoint = os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "{}_net_ray_marching.pth".format(resume_iter)) trans_file = None if trans_name.strip() == "no" else os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "transforms", trans_name + ".txt") inds_file = None if inds_name.strip() == "all" else os.path.join(opt.checkpoints_dir , "edit_srcs" , name , "parts_index", inds_name + ".txt") Matrix = torch.eye(4, device="cuda", dtype=torch.float32) if trans_file is None else np.loadtxt(trans_file) Rot = Matrix[:3,:3] Translation = Matrix[:3, 3] saved_features = torch.load(checkpoint, map_location="cuda") print("loaded neural points from ", checkpoint, saved_features.keys()) if inds_file is None: inds = torch.ones(len(saved_features["neural_points.xyz"]), dtype=torch.bool, device="cuda") else: inds = np.loadtxt(inds_file) return saved_features, inds, Rot, Translation def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def main(): torch.backends.cudnn.benchmark = True opt = EditOptions().parse() print("opt.color_loss_items ", opt.color_loss_items) if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) visualizer = Visualizer(opt) test_opt = copy.deepcopy(opt) test_opt.is_train = False test_opt.random_sample = 'no_crop' test_opt.random_sample_size = min(32, opt.random_sample_size) test_opt.batch_size = 1 test_opt.n_threads = 0 test_opt.split = "test" # test_dataset = create_dataset(test_opt) img_lst=None opt.is_train = False opt.mode = 2 if opt.resume_iter == "best": opt.resume_iter = "latest" opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir) model = create_model(opt) points_xyz_all = torch.zeros([0,3], device="cuda", dtype=torch.float32) points_embedding_all = torch.zeros([1,0,63], device="cuda", dtype=torch.float32) points_conf_all = torch.zeros([1,0,1], device="cuda", dtype=torch.float32) points_color_all = torch.zeros([1,0,3], device="cuda", dtype=torch.float32) points_dir_all = torch.zeros([1,0,3], device="cuda", dtype=torch.float32) Rw2c_all = torch.zeros([0,3,3], device="cuda", dtype=torch.float32) for name, inds_name, trans_name in zip(opt.neural_points_names, opt.parts_index_names, opt.Transformation_names): saved_features, inds, Rot, Tran = load_parts_info(opt, name, inds_name, trans_name) inds = torch.as_tensor(inds, dtype=torch.bool, device="cuda") Rot = torch.as_tensor(Rot, dtype=torch.float32, device=inds.device) Tran = torch.as_tensor(Tran, dtype=torch.float32, device=inds.device) xyz, points_embeding, points_conf, points_dir, points_color, eulers, Rw2c = saved_features["neural_points.xyz"][inds,:], saved_features["neural_points.points_embeding"][:,inds,:] if "neural_points.points_embeding" in saved_features else None,saved_features["neural_points.points_conf"][:,inds,:] if "neural_points.points_conf" in saved_features else None, saved_features["neural_points.points_dir"][:,inds,:] if "neural_points.points_dir" in saved_features else None, saved_features["neural_points.points_color"][:,inds,:] if "neural_points.points_color" in saved_features else None, saved_features["neural_points.eulers"] if "neural_points.eulers" in saved_features else None, saved_features["neural_points.Rw2c"] if "neural_points.Rw2c" in saved_features else None Mat = torch.eye(4, device=Rot.device, dtype=torch.float32) Mat[:3,:3] = Rot Mat[:3,3] = Tran xyz = (torch.cat([xyz, torch.ones_like(xyz[:,:1])], dim=-1) @ Mat.transpose(0,1))[:,:3] print("Rot", Rot) Rw2c = Rot if Rw2c is None else Rw2c @ Rot.transpose(0,1) #.transpose(0,1) # w2c is reversed against movement Rw2c = Rw2c[None, ...].expand(len(xyz),-1,-1) points_xyz_all = torch.cat([points_xyz_all, xyz], dim=0) Rw2c_all = torch.cat([Rw2c_all, Rw2c], dim=0) points_embedding_all = torch.cat([points_embedding_all, points_embeding], dim=1) points_conf_all = torch.cat([points_conf_all, points_conf], dim=1) points_color_all = torch.cat([points_color_all, points_color], dim=1) points_dir_all = torch.cat([points_dir_all, points_dir], dim=1) model.set_points(points_xyz_all.cuda(), points_embedding_all.cuda(), points_color=points_color_all.cuda(), points_dir=points_dir_all.cuda(), points_conf=points_conf_all.cuda(), Rw2c=Rw2c_all.cuda(), editing=True) visualizer.save_neural_points("pnts", model.neural_points.xyz, None, None, save_ref=False) print("vis") # exit() test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.test_num_step=1 # opt.test_num_step test_opt.name = opt.name + "/{}".format(opt.render_name) test_opt.render_only = 1 model.opt.no_loss = 1 model.opt.is_train = 0 model.setup(opt) print("full datasets test:") test_dataset = create_dataset(test_opt) render(model, test_dataset, Visualizer(test_opt), test_opt, gen_vid=True) # model.opt.no_loss = 0 # model.opt.is_train = 1 other_states = { 'epoch_count': 0, 'total_steps': 0, } print('saving model ({}, epoch {}, total_steps {})'.format(opt.name, 0, 0)) model.save_networks(0, other_states) # # def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps): # print("total:", xyz.shape, points_color.shape, points_conf.shape) # colors, confs = points_color[0], points_conf[0,...,0] # pre = -1000 # for i in range(12): # thresh = (i * 0.1) if i <= 10 else 1000 # mask = ((confs <= thresh) * (confs > pre)) > 0 # thresh_xyz = xyz[mask, :] # thresh_color = colors[mask, :] # visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False) # pre = thresh # exit() if __name__ == '__main__': main()
11,213
42.465116
774
py
pointnerf
pointnerf-master/run/evaluate.py
import os, sys, time, argparse, cv2 import numpy as np try: from skimage.measure import compare_ssim from skimage.measure import compare_psnr except: from skimage.metrics import structural_similarity from skimage.metrics import peak_signal_noise_ratio as compare_psnr def compare_ssim(gt, img, win_size, multichannel=True): return structural_similarity(gt, img, win_size=win_size, multichannel=multichannel) import torch from skimage.metrics import mean_squared_error import lpips parser = argparse.ArgumentParser(description="compute scores") parser.add_argument('-i', '--imgFolder', help="The folder that contain output images.") parser.add_argument('-g', '--gtFolder', default=None, help="The folder that contain gt images. By default it uses imgFolder") parser.add_argument('-o', '--outFolder', default=None, help="The folder that contain output files. By default it uses imgFolder") parser.add_argument('-is', '--imgStr', default="step-%04d-fine_raycolor.png", help="The string format for input images.") parser.add_argument('-gs', '--gtStr', default="step-%04d-gt_image.png", help="The string format for GT images.") parser.add_argument('-l', '--id_list', nargs='+', default=list(range(999)), help="The list of ids to test. By default it's 0~999.") parser.add_argument('-m', '--metrics', nargs='+', default=["psnr", "ssim", "lpips", "vgglpips"], help="The list of metrics to compute. By default it computes psnr, ssim and rmse.") def report_metrics(gtFolder, imgFolder, outFolder, metrics, id_list, imgStr="step-%04d-fine_raycolor.png", gtStr="step-%04d-gt_image.png", use_gpu=False, print_info=True): total ={} loss_fn, loss_fn_vgg = None, None if print_info: print("test id_list", id_list) print(gtFolder, imgFolder, outFolder) print(imgStr, gtStr) if "lpips" in metrics: loss_fn = lpips.LPIPS(net='alex', version='0.1') # we follow NVSF to use alex 0.1, NeRF use lpips.LPIPS(net='vgg') loss_fn = loss_fn.cuda() if use_gpu else loss_fn if "vgglpips" in metrics: loss_fn_vgg = lpips.LPIPS(net='vgg', version='0.1') #lpips.LPIPS(net='vgg') loss_fn_vgg = loss_fn_vgg.cuda() if use_gpu else loss_fn_vgg for i in id_list: img = cv2.imread(imgFolder+"/"+imgStr%i) gt = cv2.imread(gtFolder+"/"+gtStr%i) # print("img", imgFolder+"/"+imgStr%i) if img is None or gt is None: break img = np.asarray(img, np.float32)/255.0 gt = np.asarray(gt, np.float32)/255.0 for key in metrics: if key == "psnr": val = compare_psnr(gt, img) elif key == "ssim": val = compare_ssim(gt, img, 11, multichannel=True) elif key == "lpips": # image should be RGB, IMPORTANT: normalized to [-1,1] img_tensor = torch.from_numpy(img)[None].permute(0, 3, 1, 2).float() * 2 - 1.0 gt_tensor = torch.from_numpy(gt)[None].permute(0, 3, 1, 2).float() * 2 - 1.0 img_tensor = img_tensor.cuda() if use_gpu else img_tensor gt_tensor = gt_tensor.cuda() if use_gpu else gt_tensor val = loss_fn(img_tensor, gt_tensor).item() elif key == "vgglpips": # image should be RGB, IMPORTANT: normalized to [-1,1] img_tensor = torch.from_numpy(img)[None].permute(0, 3, 1, 2).float() * 2 - 1.0 gt_tensor = torch.from_numpy(gt)[None].permute(0, 3, 1, 2).float() * 2 - 1.0 img_tensor = img_tensor.cuda() if use_gpu else img_tensor gt_tensor = gt_tensor.cuda() if use_gpu else gt_tensor val = loss_fn_vgg(img_tensor, gt_tensor).item() elif key == "rmse": val = np.sqrt(mean_squared_error(gt, img)) else: raise NotImplementedError("metrics of {} not implemented".format(key)) if key not in total: total[key] = [val] else: total[key].append(val) del loss_fn del loss_fn_vgg torch.cuda.empty_cache() print(len(id_list), "images computed") if len(total) > 0: outStr = "" for key in total.keys(): vals = np.asarray(total[key]).reshape(-1) np.savetxt(outFolder+"/"+key+'.txt', vals) outStr+= key + ": %.6f\n"%np.mean(vals) print(outStr) with open(outFolder+"/scores.txt", "w") as f: f.write(outStr) ############################ if __name__ == '__main__': args = parser.parse_args() if args.gtFolder is None: args.gtFolder = args.imgFolder if args.outFolder is None: args.outFolder = args.imgFolder report_metrics(args.gtFolder, args.imgFolder, args.outFolder, args.metrics, args.id_list, imgStr=args.imgStr, gtStr=args.gtStr, use_gpu=True, print_info=False) # python run/evaluate.py -i ${nrCheckpoint}/dragon-test/images -g ${nrCheckpoint}/dragon-test/images -is step-%04d-fine_raycolor.png # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/lego_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcollego360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "lego_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/lego_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcollego360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "lego_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ship_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcolship360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "ship_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ship_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pcolship360_load_confcolordir_KNN8_LRelu_grid320_553_agg2_prl2e3/test_250000/images --imgStr "ship_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/chair_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pchair360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3.sh/test_250000/images --imgStr "chair_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/chair_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pchair360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3.sh/test_250000/images --imgStr "chair_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/materials_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/materials360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "materials_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/materials_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/materials360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_zeroone0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "materials_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/drums_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/drums360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_pru6_e4_prle3/test_250000/images --imgStr "drums_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/drums_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/drums360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_pru6_e4_prle3/test_250000/images --imgStr "drums_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ficus_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pficus360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl8e3/test_250000/images --imgStr "ficus_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/ficus_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pficus360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl8e3/test_250000/images --imgStr "ficus_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/mic_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pmic360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl2e3/test_250000/images --imgStr "mic_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/mic_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/pmic360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask0_agg2_prl2e3/test_250000/images --imgStr "mic_test_64_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/hotdog_8_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/photdog360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask5_agg2_zeroone0.0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "hotdog_test_8_50_%d_infer.png" # python run/evaluate.py -i /home/xharlie/user_space/dev/npbg/nerf_synth/npbg_results/hotdog_64_50/ -g /home/xharlie/user_space/codes/testNr/checkpoints/photdog360_confcolordir_KNN8_LRelu_grid320_333_fullgeomask5_agg2_zeroone0.0001_confree_80_pru1_e4_prl2e3/test_250000/images --imgStr "hotdog_test_64_50_%d_infer.png"
9,769
61.229299
324
py
pointnerf
pointnerf-master/run/train_ft_nonstop.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import glob import copy import torch import numpy as np import time from options import TrainOptions from data import create_data_loader, create_dataset from models import create_model from models.mvs.mvs_points_model import MvsPointsModel from models.mvs import mvs_utils, filter_utils from pprint import pprint from utils.visualizer import Visualizer from utils import format as fmt from run.evaluate import report_metrics torch.manual_seed(0) np.random.seed(0) import random import cv2 from PIL import Image from tqdm import tqdm import gc def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) def save_image(img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def nearest_view(campos, raydir, xyz, id_list): cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long) step=10000 for i in range(0, len(xyz), step): dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3 dists_norm = torch.norm(dists, dim=-1) # N, M dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3 dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1 return cam_ind def gen_points_filter_embeddings(dataset, visualizer, opt): print('-----------------------------------Generate Points-----------------------------------') opt.is_train=False opt.mode = 1 model = create_model(opt) model.setup(opt) model.eval() cam_xyz_all = [] intrinsics_all = [] extrinsics_all = [] confidence_all = [] points_mask_all = [] intrinsics_full_lst = [] confidence_filtered_all = [] near_fars_all = [] gpu_filter = True cpu2gpu= len(dataset.view_id_list) > 300 imgs_lst, HDWD_lst, c2ws_lst, w2cs_lst, intrinsics_lst = [],[],[],[],[] with torch.no_grad(): for i in tqdm(range(0, len(dataset.view_id_list))): data = dataset.get_init_item(i) model.set_input(data) # intrinsics 1, 3, 3, 3 points_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, c2ws, w2cs, intrinsics, near_fars = model.gen_points() # visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0) B, N, C, H, W, _ = points_xyz_lst[0].shape # print("points_xyz_lst",points_xyz_lst[0].shape) cam_xyz_all.append((points_xyz_lst[0].cpu() if cpu2gpu else points_xyz_lst[0]) if gpu_filter else points_xyz_lst[0].cpu().numpy()) # intrinsics_lst[0] 1, 3, 3 intrinsics_all.append(intrinsics_lst[0] if gpu_filter else intrinsics_lst[0]) extrinsics_all.append(extrinsics_lst[0] if gpu_filter else extrinsics_lst[0].cpu().numpy()) if opt.manual_depth_view !=0: confidence_all.append((photometric_confidence_lst[0].cpu() if cpu2gpu else photometric_confidence_lst[0]) if gpu_filter else photometric_confidence_lst[0].cpu().numpy()) points_mask_all.append((point_mask_lst[0].cpu() if cpu2gpu else point_mask_lst[0]) if gpu_filter else point_mask_lst[0].cpu().numpy()) imgs_lst.append(data["images"].cpu()) HDWD_lst.append(HDWD) c2ws_lst.append(c2ws) w2cs_lst.append(w2cs) intrinsics_full_lst.append(intrinsics) near_fars_all.append(near_fars[0,0]) # visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0) # #################### start query embedding ################## torch.cuda.empty_cache() if opt.manual_depth_view != 0: if gpu_filter: _, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=True, return_w=True, cpu2gpu=cpu2gpu, near_fars_all=near_fars_all) else: _, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks(cam_xyz_all, [intr.cpu().numpy() for intr in intrinsics_all], extrinsics_all, confidence_all, points_mask_all, opt) # print(xyz_ref_lst[0].shape) # 224909, 3 else: cam_xyz_all = [cam_xyz_all[i].reshape(-1,3)[points_mask_all[i].reshape(-1),:] for i in range(len(cam_xyz_all))] xyz_world_all = [np.matmul(np.concatenate([cam_xyz_all[i], np.ones_like(cam_xyz_all[i][..., 0:1])], axis=-1), np.transpose(np.linalg.inv(extrinsics_all[i][0,...])))[:, :3] for i in range(len(cam_xyz_all))] xyz_world_all, cam_xyz_all, confidence_filtered_all = filter_by_masks.range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_all, opt) del cam_xyz_all # for i in range(len(xyz_world_all)): # visualizer.save_neural_points(i, torch.as_tensor(xyz_world_all[i], device="cuda", dtype=torch.float32), None, data, save_ref=opt.load_points==0) # exit() # xyz_world_all = xyz_world_all.cuda() # confidence_filtered_all = confidence_filtered_all.cuda() points_vid = torch.cat([torch.ones_like(xyz_world_all[i][...,0:1]) * i for i in range(len(xyz_world_all))], dim=0) xyz_world_all = torch.cat(xyz_world_all, dim=0) if gpu_filter else torch.as_tensor( np.concatenate(xyz_world_all, axis=0), device="cuda", dtype=torch.float32) confidence_filtered_all = torch.cat(confidence_filtered_all, dim=0) if gpu_filter else torch.as_tensor(np.concatenate(confidence_filtered_all, axis=0), device="cuda", dtype=torch.float32) print("xyz_world_all", xyz_world_all.shape, points_vid.shape, confidence_filtered_all.shape) torch.cuda.empty_cache() # visualizer.save_neural_points(0, xyz_world_all, None, None, save_ref=False) # print("vis 0") print("%%%%%%%%%%%%% getattr(dataset, spacemin, None)", getattr(dataset, "spacemin", None)) if getattr(dataset, "spacemin", None) is not None: mask = (xyz_world_all - dataset.spacemin[None, ...].to(xyz_world_all.device)) >= 0 mask *= (dataset.spacemax[None, ...].to(xyz_world_all.device) - xyz_world_all) >= 0 mask = torch.prod(mask, dim=-1) > 0 first_lst, second_lst = masking(mask, [xyz_world_all, points_vid, confidence_filtered_all], []) xyz_world_all, points_vid, confidence_filtered_all = first_lst # visualizer.save_neural_points(50, xyz_world_all, None, None, save_ref=False) # print("vis 50") if getattr(dataset, "alphas", None) is not None: vishull_mask = mvs_utils.alpha_masking(xyz_world_all, dataset.alphas, dataset.intrinsics, dataset.cam2worlds, dataset.world2cams, dataset.near_far if opt.ranges[0] < -90.0 and getattr(dataset,"spacemin",None) is None else None, opt=opt) first_lst, second_lst = masking(vishull_mask, [xyz_world_all, points_vid, confidence_filtered_all], []) xyz_world_all, points_vid, confidence_filtered_all = first_lst print("alpha masking xyz_world_all", xyz_world_all.shape, points_vid.shape) # visualizer.save_neural_points(100, xyz_world_all, None, data, save_ref=opt.load_points == 0) # print("vis 100") if opt.vox_res > 0: xyz_world_all, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(xyz_world_all.cuda() if len(xyz_world_all) < 99999999 else xyz_world_all[::(len(xyz_world_all)//99999999+1),...].cuda(), opt.vox_res) points_vid = points_vid[sampled_pnt_idx,:] confidence_filtered_all = confidence_filtered_all[sampled_pnt_idx] print("after voxelize:", xyz_world_all.shape, points_vid.shape) xyz_world_all = xyz_world_all.cuda() xyz_world_all = [xyz_world_all[points_vid[:,0]==i, :] for i in range(len(HDWD_lst))] confidence_filtered_all = [confidence_filtered_all[points_vid[:,0]==i] for i in range(len(HDWD_lst))] cam_xyz_all = [(torch.cat([xyz_world_all[i], torch.ones_like(xyz_world_all[i][...,0:1])], dim=-1) @ extrinsics_all[i][0].t())[...,:3] for i in range(len(HDWD_lst))] points_embedding_all, points_color_all, points_dir_all, points_conf_all = [], [], [], [] for i in tqdm(range(len(HDWD_lst))): if len(xyz_world_all[i]) > 0: embedding, color, dir, conf = model.query_embedding(HDWD_lst[i], torch.as_tensor(cam_xyz_all[i][None, ...], device="cuda", dtype=torch.float32), torch.as_tensor(confidence_filtered_all[i][None, :, None], device="cuda", dtype=torch.float32) if len(confidence_filtered_all) > 0 else None, imgs_lst[i].cuda(), c2ws_lst[i], w2cs_lst[i], intrinsics_full_lst[i], 0, pointdir_w=True) points_embedding_all.append(embedding) points_color_all.append(color) points_dir_all.append(dir) points_conf_all.append(conf) xyz_world_all = torch.cat(xyz_world_all, dim=0) points_embedding_all = torch.cat(points_embedding_all, dim=1) points_color_all = torch.cat(points_color_all, dim=1) if points_color_all[0] is not None else None points_dir_all = torch.cat(points_dir_all, dim=1) if points_dir_all[0] is not None else None points_conf_all = torch.cat(points_conf_all, dim=1) if points_conf_all[0] is not None else None visualizer.save_neural_points(200, xyz_world_all, points_color_all, data, save_ref=opt.load_points == 0) print("vis") model.cleanup() del model return xyz_world_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, [img[0].cpu() for img in imgs_lst], [c2w for c2w in c2ws_lst], [w2c for w2c in w2cs_lst] , intrinsics_all, [list(HDWD) for HDWD in HDWD_lst] def masking(mask, firstdim_lst, seconddim_lst): first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst] second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst] return first_lst, second_lst def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True): print('-----------------------------------Rendering-----------------------------------') model.eval() total_num = dataset.total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() for i in range(0, total_num): data = dataset.get_dummyrot_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() # cam_posts.append(data['campos']) # cam_dirs.append(data['raydir'] + data['campos'][None,...]) # continue visuals = None stime = time.time() for k in range(0, height * width, chunk_size): start = k end = min([k + chunk_size, height * width]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] # print("tmpgts", tmpgts["gt_image"].shape) # print(data["pixel_idx"]) model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray model.test() curr_visuals = model.get_current_visuals(data=data) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if key == "gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype) visuals[key][start:end, :] = chunk else: for key, value in curr_visuals.items(): if key == "gt_image": continue visuals[key][start:end, :] = value.cpu().numpy() for key, value in visuals.items(): visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i) # visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False) # visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False) # print("vis") # exit() print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num), 0) print('--------------------------------Finish generating vid--------------------------------') return def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=False, lpips=True): print('-----------------------------------Testing-----------------------------------') model.eval() total_num = dataset.total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() count = 0; for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step data = dataset.get_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() edge_mask = torch.zeros([height, width], dtype=torch.bool) edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1 edge_mask=edge_mask.reshape(-1) > 0 np_edge_mask=edge_mask.numpy().astype(bool) totalpixel = pixel_idx.shape[1] tmpgts = {} tmpgts["gt_image"] = data['gt_image'].clone() tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None # data.pop('gt_image', None) data.pop('gt_mask', None) visuals = None stime = time.time() ray_masks = [] for k in range(0, totalpixel, chunk_size): start = k end = min([k + chunk_size, totalpixel]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray # xyz_world_sect_plane_lst.append(xyz_world_sect_plane) model.test() curr_visuals = model.get_current_visuals(data=data) # print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda()))) # print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:]))) chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk else: for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy() if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: ray_masks.append(model.output["ray_mask"] > 0) if len(ray_masks) > 0: ray_masks = torch.cat(ray_masks, dim=1) # visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True) # exit() # print("curr_visuals",curr_visuals) pixel_idx=pixel_idx.to(torch.long) gt_image = torch.zeros((height*width, 3), dtype=torch.float32) gt_image[edge_mask, :] = tmpgts['gt_image'].clone() if 'gt_image' in model.visual_names: visuals['gt_image'] = gt_image if 'gt_mask' in curr_visuals: visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask'] if 'ray_masked_coarse_raycolor' in model.visual_names: visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape) visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 if 'ray_depth_masked_coarse_raycolor' in model.visual_names: visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'ray_depth_masked_gt_image' in model.visual_names: visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'gt_image_ray_masked' in model.visual_names: visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 for key, value in visuals.items(): if key in opt.visual_items: visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i, opt=opt) acc_dict = {} if "coarse_raycolor" in opt.test_color_loss_items: loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda()) acc_dict.update({"coarse_raycolor": loss}) print("coarse_raycolor", loss, mse2psnr(loss)) if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3) ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3) # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt") # filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone() # tmpgtssave[~ray_masks,:] = 1.0 # img = np.array(tmpgtssave.view(height,width,3)) # save_image(img, filepath) # # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor") # filepath = os.path.join( # "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3)) # csave[~ray_masks, :] = 1.0 # csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:] # img = np.array(csave.view(height, width, 3)) # save_image(img, filepath) loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss))) if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items: ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1) masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)) ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3)) loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_depth_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss))) print(acc_dict.items()) visualizer.accumulate_losses(acc_dict) count+=1 visualizer.print_losses(count) psnr = visualizer.get_psnr(opt.test_color_loss_items[0]) # visualizer.reset() print('--------------------------------Finish Test Rendering--------------------------------') report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1])) print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps) print('--------------------------------Finish generating vid--------------------------------') return psnr def probe_hole(model, dataset, visualizer, opt, bg_info, test_steps=0, opacity_thresh=0.7): print('-----------------------------------Probing Holes-----------------------------------') add_xyz = torch.zeros([0, 3], device="cuda", dtype=torch.float32) add_conf = torch.zeros([0, 1], device="cuda", dtype=torch.float32) add_color = torch.zeros([0, 3], device="cuda", dtype=torch.float32) add_dir = torch.zeros([0, 3], device="cuda", dtype=torch.float32) add_embedding = torch.zeros([0, opt.point_features_dim], device="cuda", dtype=torch.float32) kernel_size = model.opt.kernel_size if opt.prob_kernel_size is not None: tier = np.sum(np.asarray(opt.prob_tiers) < test_steps) print("cal by tier", tier) model.opt.query_size = np.asarray(opt.prob_kernel_size[tier*3:tier*3+3]) print("prob query size =", model.opt.query_size) model.opt.prob = 1 total_num = len(model.top_ray_miss_ids) -1 if opt.prob_mode == 0 and opt.prob_num_step > 1 else len(dataset) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() max_num = len(dataset) // opt.prob_num_step take_top = False if opt.prob_top == 1 and opt.prob_mode <= 0: # and opt.far_thresh <= 0: if getattr(model, "top_ray_miss_ids", None) is not None: mask = model.top_ray_miss_loss[:-1] > 0.0 frame_ids = model.top_ray_miss_ids[:-1][mask][:max_num] print(len(frame_ids), max_num) print("prob frame top_ray_miss_loss:", model.top_ray_miss_loss) take_top = True else: print("model has no top_ray_miss_ids") else: frame_ids = list(range(len(dataset)))[:max_num] random.shuffle(frame_ids) frame_ids = frame_ids[:max_num] print("{}/{} has holes, id_lst to prune".format(len(frame_ids), total_num), frame_ids, opt.prob_num_step) print("take top:", take_top, "; prob frame ids:", frame_ids) with tqdm(range(len(frame_ids))) as pbar: for j in pbar: i = frame_ids[j] pbar.set_description("Processing frame id %d" % i) data = dataset.get_item(i) bg = data['bg_color'][None, :].cuda() raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() edge_mask = torch.zeros([height, width], dtype=torch.bool, device='cuda') edge_mask[pixel_idx[0, ..., 1].to(torch.long), pixel_idx[0, ..., 0].to(torch.long)] = 1 edge_mask = edge_mask.reshape(-1) > 0 totalpixel = pixel_idx.shape[1] gt_image_full = data['gt_image'].cuda() probe_keys = ["coarse_raycolor", "ray_mask", "ray_max_sample_loc_w", "ray_max_far_dist", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding"] prob_maps = {} for k in range(0, totalpixel, chunk_size): start = k end = min([k + chunk_size, totalpixel]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) output = model.test() chunk_pixel_id = data["pixel_idx"].to(torch.long) output["ray_mask"] = output["ray_mask"][..., None] for key in probe_keys: if "ray_max_shading_opacity" not in output and key != 'coarse_raycolor': break if output[key] is None: prob_maps[key] = None else: if key not in prob_maps.keys(): C = output[key].shape[-1] prob_maps[key] = torch.zeros((height, width, C), device="cuda", dtype=output[key].dtype) prob_maps[key][chunk_pixel_id[0, ..., 1], chunk_pixel_id[0, ..., 0], :] = output[key] gt_image = torch.zeros((height * width, 3), dtype=torch.float32, device=prob_maps["ray_mask"].device) gt_image[edge_mask, :] = gt_image_full gt_image = gt_image.reshape(height, width, 3) miss_ray_mask = (prob_maps["ray_mask"] < 1) * (torch.norm(gt_image - bg, dim=-1, keepdim=True) > 0.002) miss_ray_inds = (edge_mask.reshape(height, width, 1) * miss_ray_mask).squeeze(-1).nonzero() # N, 2 neighbor_inds = bloat_inds(miss_ray_inds, 1, height, width) neighboring_miss_mask = torch.zeros_like(gt_image[..., 0]) neighboring_miss_mask[neighbor_inds[..., 0], neighbor_inds[...,1]] = 1 if opt.far_thresh > 0: far_ray_mask = (prob_maps["ray_mask"] > 0) * (prob_maps["ray_max_far_dist"] > opt.far_thresh) * (torch.norm(gt_image - prob_maps["coarse_raycolor"], dim=-1, keepdim=True) < 0.1) neighboring_miss_mask += far_ray_mask.squeeze(-1) neighboring_miss_mask = (prob_maps["ray_mask"].squeeze(-1) > 0) * neighboring_miss_mask * (prob_maps["ray_max_shading_opacity"].squeeze(-1) > opacity_thresh) > 0 add_xyz = torch.cat([add_xyz, prob_maps["ray_max_sample_loc_w"][neighboring_miss_mask]], dim=0) add_conf = torch.cat([add_conf, prob_maps["shading_avg_conf"][neighboring_miss_mask]], dim=0) * opt.prob_mul if prob_maps["shading_avg_conf"] is not None else None add_color = torch.cat([add_color, prob_maps["shading_avg_color"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_color"] is not None else None add_dir = torch.cat([add_dir, prob_maps["shading_avg_dir"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_dir"] is not None else None add_embedding = torch.cat([add_embedding, prob_maps["shading_avg_embedding"][neighboring_miss_mask]], dim=0) if len(add_xyz) > -1: output = prob_maps["coarse_raycolor"].permute(2,0,1)[None, None,...] visualizer.save_ref_views({"images": output}, i, subdir="prob_img_{:04d}".format(test_steps)) model.opt.kernel_size = kernel_size if opt.bgmodel.startswith("planepoints"): mask = dataset.filter_plane(add_xyz) first_lst, _ = masking(mask, [add_xyz, add_embedding, add_color, add_dir, add_conf], []) add_xyz, add_embedding, add_color, add_dir, add_conf = first_lst if len(add_xyz) > 0: visualizer.save_neural_points("prob{:04d}".format(test_steps), add_xyz, None, None, save_ref=False) visualizer.print_details("vis added points to probe folder") if opt.prob_mode == 0 and opt.prob_num_step > 1: model.reset_ray_miss_ranking() del visualizer, prob_maps model.opt.prob = 0 return add_xyz, add_embedding, add_color, add_dir, add_conf def bloat_inds(inds, shift, height, width): inds = inds[:,None,:] sx, sy = torch.meshgrid(torch.arange(-shift, shift+1, dtype=torch.long), torch.arange(-shift, shift+1, dtype=torch.long)) shift_inds = torch.stack([sx, sy],dim=-1).reshape(1, -1, 2).cuda() inds = inds + shift_inds inds = inds.reshape(-1, 2) inds[...,0] = torch.clamp(inds[...,0], min=0, max=height-1) inds[...,1] = torch.clamp(inds[...,1], min=0, max=width-1) return inds def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def create_all_bg(dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=False): total_num = dataset.total height = dataset.height width = dataset.width bg_ray_lst = [] random_sample = dataset.opt.random_sample for i in range(0, total_num): dataset.opt.random_sample = "no_crop" if dummy: data = dataset.get_dummyrot_item(i) else: data = dataset.get_item(i) raydir = data['raydir'].clone() # print("data['pixel_idx']",data['pixel_idx'].shape) # 1, 512, 640, 2 pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() start=0 end = height * width data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"]) bg_ray = bg_ray.reshape(bg_ray.shape[0], height, width, 3) # 1, 512, 640, 3 bg_ray_lst.append(bg_ray) dataset.opt.random_sample = random_sample return bg_ray_lst def main(): torch.backends.cudnn.benchmark = True opt = TrainOptions().parse() cur_device = torch.device('cuda:{}'.format(opt.gpu_ids[0]) if opt. gpu_ids else torch.device('cpu')) print("opt.color_loss_items ", opt.color_loss_items) if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) visualizer = Visualizer(opt) train_dataset = create_dataset(opt) normRw2c = train_dataset.norm_w2c[:3,:3] # torch.eye(3, device="cuda") # img_lst=None best_PSNR=0.0 best_iter=0 points_xyz_all=None with torch.no_grad(): print(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if len([n for n in glob.glob(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if os.path.isfile(n)]) > 0: if opt.bgmodel.endswith("plane"): _, _, _, _, _, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt) resume_dir = os.path.join(opt.checkpoints_dir, opt.name) if opt.resume_iter == "best": opt.resume_iter = "latest" resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir) if resume_iter is None: epoch_count = 1 total_steps = 0 visualizer.print_details("No previous checkpoints, start from scratch!!!!") else: opt.resume_iter = resume_iter states = torch.load( os.path.join(resume_dir, '{}_states.pth'.format(resume_iter)), map_location=cur_device) epoch_count = states['epoch_count'] total_steps = states['total_steps'] best_PSNR = states['best_PSNR'] if 'best_PSNR' in states else best_PSNR best_iter = states['best_iter'] if 'best_iter' in states else best_iter best_PSNR = best_PSNR.item() if torch.is_tensor(best_PSNR) else best_PSNR visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') visualizer.print_details('Continue training from {} epoch'.format(opt.resume_iter)) visualizer.print_details(f"Iter: {total_steps}") visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') del states opt.mode = 2 opt.load_points=1 opt.resume_dir=resume_dir opt.resume_iter = resume_iter opt.is_train=True model = create_model(opt) elif opt.load_points < 1: points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt) opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir) opt.is_train=True opt.mode = 2 model = create_model(opt) else: load_points = opt.load_points opt.is_train = False opt.mode = 1 opt.load_points = 0 model = create_model(opt) model.setup(opt) model.eval() if load_points in [1,3]: points_xyz_all = train_dataset.load_init_points() if load_points == 2: points_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=100) if load_points == 3: depth_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=80) print("points_xyz_all",points_xyz_all.shape) print("depth_xyz_all", depth_xyz_all.shape) filter_res = 100 pc_grid_id, _, pc_space_min, pc_space_max = mvs_utils.construct_vox_points_ind(points_xyz_all, filter_res) d_grid_id, depth_inds, _, _ = mvs_utils.construct_vox_points_ind(depth_xyz_all, filter_res, space_min=pc_space_min, space_max=pc_space_max) all_grid= torch.cat([pc_grid_id, d_grid_id], dim=0) min_id = torch.min(all_grid, dim=-2)[0] max_id = torch.max(all_grid, dim=-2)[0] - min_id max_id_lst = (max_id+1).cpu().numpy().tolist() mask = torch.ones(max_id_lst, device=d_grid_id.device) pc_maskgrid_id = (pc_grid_id - min_id[None,...]).to(torch.long) mask[pc_maskgrid_id[...,0], pc_maskgrid_id[...,1], pc_maskgrid_id[...,2]] = 0 depth_maskinds = (d_grid_id[depth_inds,:] - min_id).to(torch.long) depth_maskinds = mask[depth_maskinds[...,0], depth_maskinds[...,1], depth_maskinds[...,2]] depth_xyz_all = depth_xyz_all[depth_maskinds > 0] visualizer.save_neural_points("dep_filtered", depth_xyz_all, None, None, save_ref=False) print("vis depth; after pc mask depth_xyz_all",depth_xyz_all.shape) points_xyz_all = [points_xyz_all, depth_xyz_all] if opt.vox_res > 0 else torch.cat([points_xyz_all, depth_xyz_all],dim=0) del depth_xyz_all, depth_maskinds, mask, pc_maskgrid_id, max_id_lst, max_id, min_id, all_grid if opt.ranges[0] > -99.0: ranges = torch.as_tensor(opt.ranges, device=points_xyz_all.device, dtype=torch.float32) mask = torch.prod( torch.logical_and(points_xyz_all[..., :3] >= ranges[None, :3], points_xyz_all[..., :3] <= ranges[None, 3:]), dim=-1) > 0 points_xyz_all = points_xyz_all[mask] if opt.vox_res > 0: points_xyz_all = [points_xyz_all] if not isinstance(points_xyz_all, list) else points_xyz_all points_xyz_holder = torch.zeros([0,3], dtype=points_xyz_all[0].dtype, device="cuda") for i in range(len(points_xyz_all)): points_xyz = points_xyz_all[i] vox_res = opt.vox_res // (1.5**i) print("load points_xyz", points_xyz.shape) _, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(points_xyz.cuda() if len(points_xyz) < 80000000 else points_xyz[::(len(points_xyz) // 80000000 + 1), ...].cuda(), vox_res) points_xyz = points_xyz[sampled_pnt_idx, :] print("after voxelize:", points_xyz.shape) points_xyz_holder = torch.cat([points_xyz_holder, points_xyz], dim=0) points_xyz_all = points_xyz_holder if opt.resample_pnts > 0: if opt.resample_pnts == 1: print("points_xyz_all",points_xyz_all.shape) inds = torch.min(torch.norm(points_xyz_all, dim=-1, keepdim=True), dim=0)[1] # use the point closest to the origin else: inds = torch.randperm(len(points_xyz_all))[:opt.resample_pnts, ...] points_xyz_all = points_xyz_all[inds, ...] campos, camdir = train_dataset.get_campos_ray() cam_ind = nearest_view(campos, camdir, points_xyz_all, train_dataset.id_list) unique_cam_ind = torch.unique(cam_ind) print("unique_cam_ind", unique_cam_ind.shape) points_xyz_all = [points_xyz_all[cam_ind[:,0]==unique_cam_ind[i], :] for i in range(len(unique_cam_ind))] featuredim = opt.point_features_dim points_embedding_all = torch.zeros([1, 0, featuredim], device=unique_cam_ind.device, dtype=torch.float32) points_color_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32) points_dir_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32) points_conf_all = torch.zeros([1, 0, 1], device=unique_cam_ind.device, dtype=torch.float32) print("extract points embeding & colors", ) for i in tqdm(range(len(unique_cam_ind))): id = unique_cam_ind[i] batch = train_dataset.get_item(id, full_img=True) HDWD = [train_dataset.height, train_dataset.width] c2w = batch["c2w"][0].cuda() w2c = torch.inverse(c2w) intrinsic = batch["intrinsic"].cuda() # cam_xyz_all 252, 4 cam_xyz_all = (torch.cat([points_xyz_all[i], torch.ones_like(points_xyz_all[i][...,-1:])], dim=-1) @ w2c.transpose(0,1))[..., :3] embedding, color, dir, conf = model.query_embedding(HDWD, cam_xyz_all[None,...], None, batch['images'].cuda(), c2w[None, None,...], w2c[None, None,...], intrinsic[:, None,...], 0, pointdir_w=True) conf = conf * opt.default_conf if opt.default_conf > 0 and opt.default_conf < 1.0 else conf points_embedding_all = torch.cat([points_embedding_all, embedding], dim=1) points_color_all = torch.cat([points_color_all, color], dim=1) points_dir_all = torch.cat([points_dir_all, dir], dim=1) points_conf_all = torch.cat([points_conf_all, conf], dim=1) # visualizer.save_neural_points(id, cam_xyz_all, color, batch, save_ref=True) points_xyz_all=torch.cat(points_xyz_all, dim=0) visualizer.save_neural_points("init", points_xyz_all, points_color_all, None, save_ref=load_points == 0) print("vis") # visualizer.save_neural_points("cam", campos, None, None, None) # print("vis") # exit() opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir) opt.is_train = True opt.mode = 2 model = create_model(opt) if points_xyz_all is not None: if opt.bgmodel.startswith("planepoints"): gen_pnts, gen_embedding, gen_dir, gen_color, gen_conf = train_dataset.get_plane_param_points() visualizer.save_neural_points("pl", gen_pnts, gen_color, None, save_ref=False) print("vis pl") points_xyz_all = torch.cat([points_xyz_all, gen_pnts], dim=0) points_embedding_all = torch.cat([points_embedding_all, gen_embedding], dim=1) points_color_all = torch.cat([points_color_all, gen_dir], dim=1) points_dir_all = torch.cat([points_dir_all, gen_color], dim=1) points_conf_all = torch.cat([points_conf_all, gen_conf], dim=1) model.set_points(points_xyz_all.cuda(), points_embedding_all.cuda(), points_color=points_color_all.cuda(), points_dir=points_dir_all.cuda(), points_conf=points_conf_all.cuda(), Rw2c=normRw2c.cuda() if opt.load_points < 1 and opt.normview != 3 else None) epoch_count = 1 total_steps = 0 del points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all opt.resume_dir = os.path.join(opt.checkpoints_dir, opt.name) model.setup(opt, train_len=len(train_dataset)) model.train() data_loader = create_data_loader(opt, dataset=train_dataset) dataset_size = len(data_loader) visualizer.print_details('# training images = {}'.format(dataset_size)) # create test loader test_opt = copy.deepcopy(opt) test_opt.is_train = False test_opt.random_sample = 'no_crop' test_opt.random_sample_size = min(48, opt.random_sample_size) test_opt.batch_size = 1 test_opt.n_threads = 0 test_opt.prob = 0 test_opt.split = "test" with open('/tmp/.neural-volumetric.name', 'w') as f: f.write(opt.name + '\n') visualizer.reset() if total_steps > 0: for scheduler in model.schedulers: for i in range(total_steps): scheduler.step() fg_masks = None bg_ray_train_lst, bg_ray_test_lst = [], [] if opt.bgmodel.endswith("plane"): test_dataset = create_dataset(test_opt) bg_ray_train_lst = create_all_bg(train_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst) bg_ray_test_lst = create_all_bg(test_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst) test_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_test_lst] del test_dataset if opt.vid > 0: render_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step) bg_ray_render_lst = create_all_bg(render_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=True) render_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_render_lst] else: test_bg_info, render_bg_info = None, None img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = None, None, None, None, None ############ initial test ############### if total_steps == 0 and opt.maximum_step <= 0: with torch.no_grad(): test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.name = opt.name + "/test_{}".format(total_steps) test_opt.test_num_step = opt.test_num_step test_dataset = create_dataset(test_opt) model.opt.is_train = 0 model.opt.no_loss = 1 test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps) model.opt.no_loss = 0 model.opt.is_train = 1 model.train() exit() if total_steps == 0 and (len(train_dataset.id_list) > 30 or len(train_dataset.view_id_list) > 30): other_states = { 'epoch_count': 0, 'total_steps': total_steps, } model.save_networks(total_steps, other_states) visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, 0, total_steps)) real_start=total_steps train_random_sample_size = opt.random_sample_size for epoch in range(epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() for i, data in enumerate(data_loader): if opt.maximum_step is not None and total_steps >= opt.maximum_step: break if opt.prune_iter > 0 and real_start != total_steps and total_steps % opt.prune_iter == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0 and total_steps <= opt.prune_max_iter: with torch.no_grad(): model.clean_optimizer() model.clean_scheduler() model.prune_points(opt.prune_thresh) model.setup_optimizer(opt) model.init_scheduler(total_steps, opt) torch.cuda.empty_cache() torch.cuda.synchronize() if opt.prob_freq > 0 and real_start != total_steps and total_steps % opt.prob_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0: if opt.prob_kernel_size is not None: tier = np.sum(np.asarray(opt.prob_tiers) < total_steps) if (model.top_ray_miss_loss[0] > 1e-5 or opt.prob_mode != 0 or opt.far_thresh > 0) and (opt.prob_kernel_size is None or tier < (len(opt.prob_kernel_size) // 3)): torch.cuda.empty_cache() model.opt.is_train = 0 model.opt.no_loss = 1 with torch.no_grad(): prob_opt = copy.deepcopy(test_opt) prob_opt.name = opt.name # if opt.prob_type=0: train_dataset.opt.random_sample = "no_crop" if opt.prob_mode <= 0: train_dataset.opt.random_sample_size = min(32, train_random_sample_size) prob_dataset = train_dataset elif opt.prob_mode == 1: prob_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1) else: prob_dataset = create_comb_dataset(test_opt, opt, total_steps, test_num_step=1) model.eval() add_xyz, add_embedding, add_color, add_dir, add_conf = probe_hole(model, prob_dataset, Visualizer(prob_opt), prob_opt, None, test_steps=total_steps, opacity_thresh=opt.prob_thresh) torch.cuda.empty_cache() torch.cuda.synchronize() if opt.prob_mode != 0: del prob_dataset # else: if len(add_xyz) > 0: print("len(add_xyz)", len(add_xyz)) model.grow_points(add_xyz, add_embedding, add_color, add_dir, add_conf) length_added = len(add_xyz) del add_xyz, add_embedding, add_color, add_dir, add_conf torch.cuda.empty_cache() torch.cuda.synchronize() other_states = { "best_PSNR": best_PSNR, "best_iter": best_iter, 'epoch_count': epoch, 'total_steps': total_steps, } visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) print("other_states",other_states) model.save_networks(total_steps, other_states, back_gpu=False) visualizer.print_details( "$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(length_added, len(model.neural_points.xyz))) torch.cuda.synchronize() torch.cuda.empty_cache() # # hard reset # model.cleanup() # pprint(vars(model)) del model visualizer.reset() gc.collect() opt.is_train = 1 opt.no_loss = 0 opt.resume_iter = total_steps model = create_model(opt) model.setup(opt, train_len=len(train_dataset)) model.train() if total_steps > 0: for scheduler in model.schedulers: for i in range(total_steps): scheduler.step() else: print("$$$$$$$$$$$$$$$$$$$$$$$$$$ no qualified points to grow $$$$$$$$$$$$$$$$") # exit() # visualizer.print_details("$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(len(add_xyz), len(model.neural_points.xyz))) train_dataset.opt.random_sample = "random" model.train() model.opt.no_loss = 0 model.opt.is_train = 1 train_dataset.opt.random_sample_size = train_random_sample_size torch.cuda.synchronize() torch.cuda.empty_cache() else: visualizer.print_details( 'nothing to probe, max ray miss is only {}'.format(model.top_ray_miss_loss[0])) total_steps += 1 model.set_input(data) if opt.bgmodel.endswith("plane"): if len(bg_ray_train_lst) > 0: bg_ray_all = bg_ray_train_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(model.input) bg_ray, fg_masks = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks=fg_masks) data["bg_ray"] = bg_ray model.optimize_parameters(total_steps=total_steps) losses = model.get_current_losses() visualizer.accumulate_losses(losses) if opt.lr_policy.startswith("iter"): model.update_learning_rate(opt=opt, total_steps=total_steps) if total_steps and total_steps % opt.print_freq == 0: if opt.show_tensorboard: visualizer.plot_current_losses_with_tb(total_steps, losses) visualizer.print_losses(total_steps) visualizer.reset() if hasattr(opt, "save_point_freq") and total_steps and total_steps % opt.save_point_freq == 0 and (opt.prune_iter > 0 and total_steps <= opt.prune_max_iter or opt.save_point_freq==1): visualizer.save_neural_points(total_steps, model.neural_points.xyz, model.neural_points.points_embeding, data, save_ref=opt.load_points==0) visualizer.print_details('saving neural points at total_steps {})'.format(total_steps)) try: if total_steps == 10000 or (total_steps % opt.save_iter_freq == 0 and total_steps > 0): other_states = { "best_PSNR": best_PSNR, "best_iter": best_iter, 'epoch_count': epoch, 'total_steps': total_steps, } visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) model.save_networks(total_steps, other_states) except Exception as e: visualizer.print_details(e) if opt.vid > 0 and total_steps % opt.vid == 0 and total_steps > 0: torch.cuda.empty_cache() test_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step) model.opt.is_train = 0 model.opt.no_loss = 1 with torch.no_grad(): render_vid(model, test_dataset, Visualizer(test_opt), test_opt, render_bg_info, steps=total_steps) model.opt.no_loss = 0 model.opt.is_train = 1 del test_dataset if total_steps == 10000 or (total_steps % opt.test_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0): torch.cuda.empty_cache() test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step) model.opt.is_train = 0 model.opt.no_loss = 1 with torch.no_grad(): if opt.test_train == 0: test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True) else: train_dataset.opt.random_sample = "no_crop" test_psnr = test(model, train_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True) train_dataset.opt.random_sample = opt.random_sample model.opt.no_loss = 0 model.opt.is_train = 1 del test_dataset best_iter = total_steps if test_psnr > best_PSNR else best_iter best_PSNR = max(test_psnr, best_PSNR) visualizer.print_details(f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}") model.train() # try: # print("saving the model at the end of epoch") # other_states = {'epoch_count': epoch, 'total_steps': total_steps} # model.save_networks('latest', other_states) # # except Exception as e: # print(e) if opt.maximum_step is not None and total_steps >= opt.maximum_step: visualizer.print_details('{}: End of stepts {} / {} \t Time Taken: {} sec'.format( opt.name, total_steps, opt.maximum_step, time.time() - epoch_start_time)) break del train_dataset other_states = { 'epoch_count': epoch, 'total_steps': total_steps, } visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) model.save_networks(total_steps, other_states) torch.cuda.empty_cache() test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1) model.opt.no_loss = 1 model.opt.is_train = 0 visualizer.print_details("full datasets test:") with torch.no_grad(): test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, gen_vid=True, lpips=True) best_iter = total_steps if test_psnr > best_PSNR else best_iter best_PSNR = max(test_psnr, best_PSNR) visualizer.print_details( f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}") exit() def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps): print("total:", xyz.shape, points_color.shape, points_conf.shape) colors, confs = points_color[0], points_conf[0,...,0] pre = -1000 for i in range(12): thresh = (i * 0.1) if i <= 10 else 1000 mask = ((confs <= thresh) * (confs > pre)) > 0 thresh_xyz = xyz[mask, :] thresh_color = colors[mask, :] visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False) pre = thresh exit() def create_render_dataset(test_opt, opt, total_steps, test_num_step=1): test_opt.nerf_splits = ["render"] test_opt.split = "render" test_opt.name = opt.name + "/vid_{}".format(total_steps) test_opt.test_num_step = test_num_step test_opt.random_sample_size = 30 test_dataset = create_dataset(test_opt) return test_dataset def create_test_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1): test_opt.prob = prob if prob is not None else test_opt.prob test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.name = opt.name + "/test_{}".format(total_steps) test_opt.test_num_step = test_num_step test_dataset = create_dataset(test_opt) return test_dataset def create_comb_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1): test_opt.prob = prob if prob is not None else test_opt.prob test_opt.nerf_splits = ["comb"] test_opt.split = "comb" test_opt.name = opt.name + "/comb_{}".format(total_steps) test_opt.test_num_step = test_num_step test_dataset = create_dataset(test_opt) return test_dataset if __name__ == '__main__': main()
60,868
55.308048
392
py
pointnerf
pointnerf-master/run/render_vid.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import copy import torch import numpy as np import time from options import TestOptions from data import create_data_loader, create_dataset from models import create_model from utils.visualizer import Visualizer from utils import format as fmt from tqdm import trange def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def render_vid(model, dataset, visualizer, opt, total_steps): print( '-----------------------------------Rendering Vid-----------------------------------' ) model.eval() render_num = len(dataset.render_poses) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visual_lst = [] for i in range(render_num): data = dataset.get_dummyrot_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() visuals = None starttime=time.time() for k in range(0, height * width, chunk_size): start = k end = min([k + chunk_size, height * width]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] # data['gt_image'] = gt_image[:, start:end, :] # data['gt_mask'] = gt_mask[:, start:end, :] model.set_input(data) model.test() curr_visuals = model.get_current_visuals() if visuals is None: visuals = {} for key, value in curr_visuals.items(): if value is None or value.shape[-1] != 3 or not key.endswith("color"): continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype) visuals[key][start:end, :] = chunk else: for key, value in curr_visuals.items(): if value is None or value.shape[-1] != 3 or not key.endswith("color"): continue visuals[key][start:end, :] = value.cpu().numpy() for key, value in visuals.items(): visuals[key] = visuals[key].reshape(height, width, 3) visual_lst.append(visuals) print("render time:", time.time() - starttime) visualizer.display_video(visual_lst, total_steps) model.train() print( '--------------------------------Finish Rendering--------------------------------' ) return def main(): torch.backends.cudnn.benchmark = True opt = TestOptions().parse() opt.no_loss = True opt.gpu_ids='0' if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) if opt.resume_dir: resume_dir = opt.resume_dir resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir) opt.resume_iter = resume_iter states = torch.load(os.path.join(resume_dir, '{}_states.pth'.format(resume_iter))) epoch_count = states['epoch_count'] total_steps = states['total_steps'] print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Test {} at epoch {}'.format(opt.resume_dir, opt.resume_iter)) print("Iter: ", total_steps) print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') else: epoch_count = 1 total_steps = 0 # load model model = create_model(opt) model.setup(opt) visualizer = Visualizer(opt) # create test loader test_opt = copy.deepcopy(opt) test_opt.is_train = False test_opt.random_sample = 'no_crop' test_opt.batch_size = 1 test_opt.n_threads = 0 test_dataset = create_dataset(test_opt) dataset_size = len(test_dataset) print('# training images = {}'.format(dataset_size)) with open('/tmp/.neural-volumetric.name', 'w') as f: f.write(opt.name + '\n') visualizer.reset() render_vid(model, test_dataset, visualizer, test_opt, total_steps) if __name__ == '__main__': main()
4,702
34.097015
110
py
pointnerf
pointnerf-master/run/train_ft.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import glob import copy import torch import numpy as np import time from options import TrainOptions from data import create_data_loader, create_dataset from models import create_model from models.mvs.mvs_points_model import MvsPointsModel from models.mvs import mvs_utils, filter_utils from pprint import pprint from utils.visualizer import Visualizer from utils import format as fmt from run.evaluate import report_metrics torch.manual_seed(0) np.random.seed(0) import random import cv2 from PIL import Image from tqdm import tqdm import gc def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) def save_image(img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def nearest_view(campos, raydir, xyz, id_list): cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long) step=10000 for i in range(0, len(xyz), step): dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3 dists_norm = torch.norm(dists, dim=-1) # N, M dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3 dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1 return cam_ind def gen_points_filter_embeddings(dataset, visualizer, opt): print('-----------------------------------Generate Points-----------------------------------') opt.is_train=False opt.mode = 1 model = create_model(opt) model.setup(opt) model.eval() cam_xyz_all = [] intrinsics_all = [] extrinsics_all = [] confidence_all = [] points_mask_all = [] intrinsics_full_lst = [] confidence_filtered_all = [] near_fars_all = [] gpu_filter = True cpu2gpu= len(dataset.view_id_list) > 300 imgs_lst, HDWD_lst, c2ws_lst, w2cs_lst, intrinsics_lst = [],[],[],[],[] with torch.no_grad(): for i in tqdm(range(0, len(dataset.view_id_list))): data = dataset.get_init_item(i) model.set_input(data) # intrinsics 1, 3, 3, 3 points_xyz_lst, photometric_confidence_lst, point_mask_lst, intrinsics_lst, extrinsics_lst, HDWD, c2ws, w2cs, intrinsics, near_fars = model.gen_points() # visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0) B, N, C, H, W, _ = points_xyz_lst[0].shape # print("points_xyz_lst",points_xyz_lst[0].shape) cam_xyz_all.append((points_xyz_lst[0].cpu() if cpu2gpu else points_xyz_lst[0]) if gpu_filter else points_xyz_lst[0].cpu().numpy()) # intrinsics_lst[0] 1, 3, 3 intrinsics_all.append(intrinsics_lst[0] if gpu_filter else intrinsics_lst[0]) extrinsics_all.append(extrinsics_lst[0] if gpu_filter else extrinsics_lst[0].cpu().numpy()) if opt.manual_depth_view !=0: confidence_all.append((photometric_confidence_lst[0].cpu() if cpu2gpu else photometric_confidence_lst[0]) if gpu_filter else photometric_confidence_lst[0].cpu().numpy()) points_mask_all.append((point_mask_lst[0].cpu() if cpu2gpu else point_mask_lst[0]) if gpu_filter else point_mask_lst[0].cpu().numpy()) imgs_lst.append(data["images"].cpu()) HDWD_lst.append(HDWD) c2ws_lst.append(c2ws) w2cs_lst.append(w2cs) intrinsics_full_lst.append(intrinsics) near_fars_all.append(near_fars[0,0]) # visualizer.save_neural_points(i, points_xyz_lst[0], None, data, save_ref=opt.load_points == 0) # #################### start query embedding ################## torch.cuda.empty_cache() if opt.manual_depth_view != 0: if gpu_filter: _, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks_gpu(cam_xyz_all, intrinsics_all, extrinsics_all, confidence_all, points_mask_all, opt, vis=True, return_w=True, cpu2gpu=cpu2gpu, near_fars_all=near_fars_all) else: _, xyz_world_all, confidence_filtered_all = filter_utils.filter_by_masks(cam_xyz_all, [intr.cpu().numpy() for intr in intrinsics_all], extrinsics_all, confidence_all, points_mask_all, opt) # print(xyz_ref_lst[0].shape) # 224909, 3 else: cam_xyz_all = [cam_xyz_all[i].reshape(-1,3)[points_mask_all[i].reshape(-1),:] for i in range(len(cam_xyz_all))] xyz_world_all = [np.matmul(np.concatenate([cam_xyz_all[i], np.ones_like(cam_xyz_all[i][..., 0:1])], axis=-1), np.transpose(np.linalg.inv(extrinsics_all[i][0,...])))[:, :3] for i in range(len(cam_xyz_all))] xyz_world_all, cam_xyz_all, confidence_filtered_all = filter_by_masks.range_mask_lst_np(xyz_world_all, cam_xyz_all, confidence_filtered_all, opt) del cam_xyz_all # for i in range(len(xyz_world_all)): # visualizer.save_neural_points(i, torch.as_tensor(xyz_world_all[i], device="cuda", dtype=torch.float32), None, data, save_ref=opt.load_points==0) # exit() # xyz_world_all = xyz_world_all.cuda() # confidence_filtered_all = confidence_filtered_all.cuda() points_vid = torch.cat([torch.ones_like(xyz_world_all[i][...,0:1]) * i for i in range(len(xyz_world_all))], dim=0) xyz_world_all = torch.cat(xyz_world_all, dim=0) if gpu_filter else torch.as_tensor( np.concatenate(xyz_world_all, axis=0), device="cuda", dtype=torch.float32) confidence_filtered_all = torch.cat(confidence_filtered_all, dim=0) if gpu_filter else torch.as_tensor(np.concatenate(confidence_filtered_all, axis=0), device="cuda", dtype=torch.float32) print("xyz_world_all", xyz_world_all.shape, points_vid.shape, confidence_filtered_all.shape) torch.cuda.empty_cache() # visualizer.save_neural_points(0, xyz_world_all, None, None, save_ref=False) # print("vis 0") print("%%%%%%%%%%%%% getattr(dataset, spacemin, None)", getattr(dataset, "spacemin", None)) if getattr(dataset, "spacemin", None) is not None: mask = (xyz_world_all - dataset.spacemin[None, ...].to(xyz_world_all.device)) >= 0 mask *= (dataset.spacemax[None, ...].to(xyz_world_all.device) - xyz_world_all) >= 0 mask = torch.prod(mask, dim=-1) > 0 first_lst, second_lst = masking(mask, [xyz_world_all, points_vid, confidence_filtered_all], []) xyz_world_all, points_vid, confidence_filtered_all = first_lst # visualizer.save_neural_points(50, xyz_world_all, None, None, save_ref=False) # print("vis 50") if getattr(dataset, "alphas", None) is not None: vishull_mask = mvs_utils.alpha_masking(xyz_world_all, dataset.alphas, dataset.intrinsics, dataset.cam2worlds, dataset.world2cams, dataset.near_far if opt.ranges[0] < -90.0 and getattr(dataset,"spacemin",None) is None else None, opt=opt) first_lst, second_lst = masking(vishull_mask, [xyz_world_all, points_vid, confidence_filtered_all], []) xyz_world_all, points_vid, confidence_filtered_all = first_lst print("alpha masking xyz_world_all", xyz_world_all.shape, points_vid.shape) # visualizer.save_neural_points(100, xyz_world_all, None, data, save_ref=opt.load_points == 0) # print("vis 100") if opt.vox_res > 0: xyz_world_all, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(xyz_world_all.cuda() if len(xyz_world_all) < 99999999 else xyz_world_all[::(len(xyz_world_all)//99999999+1),...].cuda(), opt.vox_res) points_vid = points_vid[sampled_pnt_idx,:] confidence_filtered_all = confidence_filtered_all[sampled_pnt_idx] print("after voxelize:", xyz_world_all.shape, points_vid.shape) xyz_world_all = xyz_world_all.cuda() xyz_world_all = [xyz_world_all[points_vid[:,0]==i, :] for i in range(len(HDWD_lst))] confidence_filtered_all = [confidence_filtered_all[points_vid[:,0]==i] for i in range(len(HDWD_lst))] cam_xyz_all = [(torch.cat([xyz_world_all[i], torch.ones_like(xyz_world_all[i][...,0:1])], dim=-1) @ extrinsics_all[i][0].t())[...,:3] for i in range(len(HDWD_lst))] points_embedding_all, points_color_all, points_dir_all, points_conf_all = [], [], [], [] for i in tqdm(range(len(HDWD_lst))): if len(xyz_world_all[i]) > 0: embedding, color, dir, conf = model.query_embedding(HDWD_lst[i], torch.as_tensor(cam_xyz_all[i][None, ...], device="cuda", dtype=torch.float32), torch.as_tensor(confidence_filtered_all[i][None, :, None], device="cuda", dtype=torch.float32) if len(confidence_filtered_all) > 0 else None, imgs_lst[i].cuda(), c2ws_lst[i], w2cs_lst[i], intrinsics_full_lst[i], 0, pointdir_w=True) points_embedding_all.append(embedding) points_color_all.append(color) points_dir_all.append(dir) points_conf_all.append(conf) xyz_world_all = torch.cat(xyz_world_all, dim=0) points_embedding_all = torch.cat(points_embedding_all, dim=1) points_color_all = torch.cat(points_color_all, dim=1) if points_color_all[0] is not None else None points_dir_all = torch.cat(points_dir_all, dim=1) if points_dir_all[0] is not None else None points_conf_all = torch.cat(points_conf_all, dim=1) if points_conf_all[0] is not None else None visualizer.save_neural_points(200, xyz_world_all, points_color_all, data, save_ref=opt.load_points == 0) print("vis") model.cleanup() del model return xyz_world_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, [img[0].cpu() for img in imgs_lst], [c2w for c2w in c2ws_lst], [w2c for w2c in w2cs_lst] , intrinsics_all, [list(HDWD) for HDWD in HDWD_lst] def masking(mask, firstdim_lst, seconddim_lst): first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst] second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst] return first_lst, second_lst def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True): print('-----------------------------------Rendering-----------------------------------') model.eval() total_num = dataset.total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() for i in range(0, total_num): data = dataset.get_dummyrot_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() # cam_posts.append(data['campos']) # cam_dirs.append(data['raydir'] + data['campos'][None,...]) # continue visuals = None stime = time.time() for k in range(0, height * width, chunk_size): start = k end = min([k + chunk_size, height * width]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] # print("tmpgts", tmpgts["gt_image"].shape) # print(data["pixel_idx"]) model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray model.test() curr_visuals = model.get_current_visuals(data=data) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if key == "gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype) visuals[key][start:end, :] = chunk else: for key, value in curr_visuals.items(): if key == "gt_image": continue visuals[key][start:end, :] = value.cpu().numpy() for key, value in visuals.items(): visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i) # visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False) # visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False) # print("vis") # exit() print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num), 0) print('--------------------------------Finish generating vid--------------------------------') return def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=False, lpips=True): print('-----------------------------------Testing-----------------------------------') model.eval() total_num = dataset.total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() count = 0; for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step data = dataset.get_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() edge_mask = torch.zeros([height, width], dtype=torch.bool) edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1 edge_mask=edge_mask.reshape(-1) > 0 np_edge_mask=edge_mask.numpy().astype(bool) totalpixel = pixel_idx.shape[1] tmpgts = {} tmpgts["gt_image"] = data['gt_image'].clone() tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None # data.pop('gt_image', None) data.pop('gt_mask', None) visuals = None stime = time.time() ray_masks = [] for k in range(0, totalpixel, chunk_size): start = k end = min([k + chunk_size, totalpixel]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray # xyz_world_sect_plane_lst.append(xyz_world_sect_plane) model.test() curr_visuals = model.get_current_visuals(data=data) # print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda()))) # print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:]))) chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk else: for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy() if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: ray_masks.append(model.output["ray_mask"] > 0) if len(ray_masks) > 0: ray_masks = torch.cat(ray_masks, dim=1) # visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True) # exit() # print("curr_visuals",curr_visuals) pixel_idx=pixel_idx.to(torch.long) gt_image = torch.zeros((height*width, 3), dtype=torch.float32) gt_image[edge_mask, :] = tmpgts['gt_image'].clone() if 'gt_image' in model.visual_names: visuals['gt_image'] = gt_image if 'gt_mask' in curr_visuals: visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask'] if 'ray_masked_coarse_raycolor' in model.visual_names: visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape) visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 if 'ray_depth_masked_coarse_raycolor' in model.visual_names: visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'ray_depth_masked_gt_image' in model.visual_names: visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'gt_image_ray_masked' in model.visual_names: visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 for key, value in visuals.items(): if key in opt.visual_items: visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i, opt=opt) acc_dict = {} if "coarse_raycolor" in opt.test_color_loss_items: loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda()) acc_dict.update({"coarse_raycolor": loss}) print("coarse_raycolor", loss, mse2psnr(loss)) if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3) ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3) # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt") # filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone() # tmpgtssave[~ray_masks,:] = 1.0 # img = np.array(tmpgtssave.view(height,width,3)) # save_image(img, filepath) # # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor") # filepath = os.path.join( # "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3)) # csave[~ray_masks, :] = 1.0 # csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:] # img = np.array(csave.view(height, width, 3)) # save_image(img, filepath) loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss))) if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items: ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1) masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)) ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3)) loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_depth_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss))) print(acc_dict.items()) visualizer.accumulate_losses(acc_dict) count+=1 visualizer.print_losses(count) psnr = visualizer.get_psnr(opt.test_color_loss_items[0]) # visualizer.reset() print('--------------------------------Finish Test Rendering--------------------------------') report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1])) print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps) print('--------------------------------Finish generating vid--------------------------------') return psnr def probe_hole(model, dataset, visualizer, opt, bg_info, test_steps=0, opacity_thresh=0.7): print('-----------------------------------Probing Holes-----------------------------------') add_xyz = torch.zeros([0, 3], device="cuda", dtype=torch.float32) add_conf = torch.zeros([0, 1], device="cuda", dtype=torch.float32) add_color = torch.zeros([0, 3], device="cuda", dtype=torch.float32) add_dir = torch.zeros([0, 3], device="cuda", dtype=torch.float32) add_embedding = torch.zeros([0, opt.point_features_dim], device="cuda", dtype=torch.float32) kernel_size = model.opt.kernel_size if opt.prob_kernel_size is not None: tier = np.sum(np.asarray(opt.prob_tiers) < test_steps) print("cal by tier", tier) model.opt.query_size = np.asarray(opt.prob_kernel_size[tier*3:tier*3+3]) print("prob query size =", model.opt.query_size) model.opt.prob = 1 total_num = len(model.top_ray_miss_ids) -1 if opt.prob_mode == 0 and opt.prob_num_step > 1 else len(dataset) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() max_num = len(dataset) // opt.prob_num_step take_top = False if opt.prob_top == 1 and opt.prob_mode <= 0: # and opt.far_thresh <= 0: if getattr(model, "top_ray_miss_ids", None) is not None: mask = model.top_ray_miss_loss[:-1] > 0.0 frame_ids = model.top_ray_miss_ids[:-1][mask][:max_num] print(len(frame_ids), max_num) print("prob frame top_ray_miss_loss:", model.top_ray_miss_loss) take_top = True else: print("model has no top_ray_miss_ids") else: frame_ids = list(range(len(dataset)))[:max_num] random.shuffle(frame_ids) frame_ids = frame_ids[:max_num] print("{}/{} has holes, id_lst to prune".format(len(frame_ids), total_num), frame_ids, opt.prob_num_step) print("take top:", take_top, "; prob frame ids:", frame_ids) with tqdm(range(len(frame_ids))) as pbar: for j in pbar: i = frame_ids[j] pbar.set_description("Processing frame id %d" % i) data = dataset.get_item(i) bg = data['bg_color'][None, :].cuda() raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() edge_mask = torch.zeros([height, width], dtype=torch.bool, device='cuda') edge_mask[pixel_idx[0, ..., 1].to(torch.long), pixel_idx[0, ..., 0].to(torch.long)] = 1 edge_mask = edge_mask.reshape(-1) > 0 totalpixel = pixel_idx.shape[1] gt_image_full = data['gt_image'].cuda() probe_keys = ["coarse_raycolor", "ray_mask", "ray_max_sample_loc_w", "ray_max_far_dist", "ray_max_shading_opacity", "shading_avg_color", "shading_avg_dir", "shading_avg_conf", "shading_avg_embedding"] prob_maps = {} for k in range(0, totalpixel, chunk_size): start = k end = min([k + chunk_size, totalpixel]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) output = model.test() chunk_pixel_id = data["pixel_idx"].to(torch.long) output["ray_mask"] = output["ray_mask"][..., None] for key in probe_keys: if "ray_max_shading_opacity" not in output and key != 'coarse_raycolor': break if output[key] is None: prob_maps[key] = None else: if key not in prob_maps.keys(): C = output[key].shape[-1] prob_maps[key] = torch.zeros((height, width, C), device="cuda", dtype=output[key].dtype) prob_maps[key][chunk_pixel_id[0, ..., 1], chunk_pixel_id[0, ..., 0], :] = output[key] gt_image = torch.zeros((height * width, 3), dtype=torch.float32, device=prob_maps["ray_mask"].device) gt_image[edge_mask, :] = gt_image_full gt_image = gt_image.reshape(height, width, 3) miss_ray_mask = (prob_maps["ray_mask"] < 1) * (torch.norm(gt_image - bg, dim=-1, keepdim=True) > 0.002) miss_ray_inds = (edge_mask.reshape(height, width, 1) * miss_ray_mask).squeeze(-1).nonzero() # N, 2 neighbor_inds = bloat_inds(miss_ray_inds, 1, height, width) neighboring_miss_mask = torch.zeros_like(gt_image[..., 0]) neighboring_miss_mask[neighbor_inds[..., 0], neighbor_inds[...,1]] = 1 if opt.far_thresh > 0: far_ray_mask = (prob_maps["ray_mask"] > 0) * (prob_maps["ray_max_far_dist"] > opt.far_thresh) * (torch.norm(gt_image - prob_maps["coarse_raycolor"], dim=-1, keepdim=True) < 0.1) neighboring_miss_mask += far_ray_mask.squeeze(-1) neighboring_miss_mask = (prob_maps["ray_mask"].squeeze(-1) > 0) * neighboring_miss_mask * (prob_maps["ray_max_shading_opacity"].squeeze(-1) > opacity_thresh) > 0 add_xyz = torch.cat([add_xyz, prob_maps["ray_max_sample_loc_w"][neighboring_miss_mask]], dim=0) add_conf = torch.cat([add_conf, prob_maps["shading_avg_conf"][neighboring_miss_mask]], dim=0) * opt.prob_mul if prob_maps["shading_avg_conf"] is not None else None add_color = torch.cat([add_color, prob_maps["shading_avg_color"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_color"] is not None else None add_dir = torch.cat([add_dir, prob_maps["shading_avg_dir"][neighboring_miss_mask]], dim=0) if prob_maps["shading_avg_dir"] is not None else None add_embedding = torch.cat([add_embedding, prob_maps["shading_avg_embedding"][neighboring_miss_mask]], dim=0) if len(add_xyz) > -1: output = prob_maps["coarse_raycolor"].permute(2,0,1)[None, None,...] visualizer.save_ref_views({"images": output}, i, subdir="prob_img_{:04d}".format(test_steps)) model.opt.kernel_size = kernel_size if opt.bgmodel.startswith("planepoints"): mask = dataset.filter_plane(add_xyz) first_lst, _ = masking(mask, [add_xyz, add_embedding, add_color, add_dir, add_conf], []) add_xyz, add_embedding, add_color, add_dir, add_conf = first_lst if len(add_xyz) > 0: visualizer.save_neural_points("prob{:04d}".format(test_steps), add_xyz, None, None, save_ref=False) visualizer.print_details("vis added points to probe folder") if opt.prob_mode == 0 and opt.prob_num_step > 1: model.reset_ray_miss_ranking() del visualizer, prob_maps model.opt.prob = 0 return add_xyz, add_embedding, add_color, add_dir, add_conf def bloat_inds(inds, shift, height, width): inds = inds[:,None,:] sx, sy = torch.meshgrid(torch.arange(-shift, shift+1, dtype=torch.long), torch.arange(-shift, shift+1, dtype=torch.long)) shift_inds = torch.stack([sx, sy],dim=-1).reshape(1, -1, 2).cuda() inds = inds + shift_inds inds = inds.reshape(-1, 2) inds[...,0] = torch.clamp(inds[...,0], min=0, max=height-1) inds[...,1] = torch.clamp(inds[...,1], min=0, max=width-1) return inds def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def create_all_bg(dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=False): total_num = dataset.total height = dataset.height width = dataset.width bg_ray_lst = [] random_sample = dataset.opt.random_sample for i in range(0, total_num): dataset.opt.random_sample = "no_crop" if dummy: data = dataset.get_dummyrot_item(i) else: data = dataset.get_item(i) raydir = data['raydir'].clone() # print("data['pixel_idx']",data['pixel_idx'].shape) # 1, 512, 640, 2 pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() start=0 end = height * width data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"]) bg_ray = bg_ray.reshape(bg_ray.shape[0], height, width, 3) # 1, 512, 640, 3 bg_ray_lst.append(bg_ray) dataset.opt.random_sample = random_sample return bg_ray_lst def main(): torch.backends.cudnn.benchmark = True opt = TrainOptions().parse() cur_device = torch.device('cuda:{}'.format(opt.gpu_ids[0]) if opt. gpu_ids else torch.device('cpu')) print("opt.color_loss_items ", opt.color_loss_items) if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) visualizer = Visualizer(opt) train_dataset = create_dataset(opt) normRw2c = train_dataset.norm_w2c[:3,:3] # torch.eye(3, device="cuda") # img_lst=None best_PSNR=0.0 best_iter=0 points_xyz_all=None with torch.no_grad(): print(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if len([n for n in glob.glob(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if os.path.isfile(n)]) > 0: if opt.bgmodel.endswith("plane"): _, _, _, _, _, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt) resume_dir = os.path.join(opt.checkpoints_dir, opt.name) if opt.resume_iter == "best": opt.resume_iter = "latest" resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir) if resume_iter is None: epoch_count = 1 total_steps = 0 visualizer.print_details("No previous checkpoints, start from scratch!!!!") else: opt.resume_iter = resume_iter states = torch.load( os.path.join(resume_dir, '{}_states.pth'.format(resume_iter)), map_location=cur_device) epoch_count = states['epoch_count'] total_steps = states['total_steps'] best_PSNR = states['best_PSNR'] if 'best_PSNR' in states else best_PSNR best_iter = states['best_iter'] if 'best_iter' in states else best_iter best_PSNR = best_PSNR.item() if torch.is_tensor(best_PSNR) else best_PSNR visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') visualizer.print_details('Continue training from {} epoch'.format(opt.resume_iter)) visualizer.print_details(f"Iter: {total_steps}") visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') del states opt.mode = 2 opt.load_points=1 opt.resume_dir=resume_dir opt.resume_iter = resume_iter opt.is_train=True model = create_model(opt) elif opt.load_points < 1: points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt) opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir) opt.is_train=True opt.mode = 2 model = create_model(opt) else: load_points = opt.load_points opt.is_train = False opt.mode = 1 opt.load_points = 0 model = create_model(opt) model.setup(opt) model.eval() if load_points in [1,3]: points_xyz_all = train_dataset.load_init_points() if load_points == 2: points_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=100) if load_points == 3: depth_xyz_all = train_dataset.load_init_depth_points(device="cuda", vox_res=80) print("points_xyz_all",points_xyz_all.shape) print("depth_xyz_all", depth_xyz_all.shape) filter_res = 100 pc_grid_id, _, pc_space_min, pc_space_max = mvs_utils.construct_vox_points_ind(points_xyz_all, filter_res) d_grid_id, depth_inds, _, _ = mvs_utils.construct_vox_points_ind(depth_xyz_all, filter_res, space_min=pc_space_min, space_max=pc_space_max) all_grid= torch.cat([pc_grid_id, d_grid_id], dim=0) min_id = torch.min(all_grid, dim=-2)[0] max_id = torch.max(all_grid, dim=-2)[0] - min_id max_id_lst = (max_id+1).cpu().numpy().tolist() mask = torch.ones(max_id_lst, device=d_grid_id.device) pc_maskgrid_id = (pc_grid_id - min_id[None,...]).to(torch.long) mask[pc_maskgrid_id[...,0], pc_maskgrid_id[...,1], pc_maskgrid_id[...,2]] = 0 depth_maskinds = (d_grid_id[depth_inds,:] - min_id).to(torch.long) depth_maskinds = mask[depth_maskinds[...,0], depth_maskinds[...,1], depth_maskinds[...,2]] depth_xyz_all = depth_xyz_all[depth_maskinds > 0] visualizer.save_neural_points("dep_filtered", depth_xyz_all, None, None, save_ref=False) print("vis depth; after pc mask depth_xyz_all",depth_xyz_all.shape) points_xyz_all = [points_xyz_all, depth_xyz_all] if opt.vox_res > 0 else torch.cat([points_xyz_all, depth_xyz_all],dim=0) del depth_xyz_all, depth_maskinds, mask, pc_maskgrid_id, max_id_lst, max_id, min_id, all_grid if opt.ranges[0] > -99.0: ranges = torch.as_tensor(opt.ranges, device=points_xyz_all.device, dtype=torch.float32) mask = torch.prod( torch.logical_and(points_xyz_all[..., :3] >= ranges[None, :3], points_xyz_all[..., :3] <= ranges[None, 3:]), dim=-1) > 0 points_xyz_all = points_xyz_all[mask] if opt.vox_res > 0: points_xyz_all = [points_xyz_all] if not isinstance(points_xyz_all, list) else points_xyz_all points_xyz_holder = torch.zeros([0,3], dtype=points_xyz_all[0].dtype, device="cuda") for i in range(len(points_xyz_all)): points_xyz = points_xyz_all[i] vox_res = opt.vox_res // (1.5**i) print("load points_xyz", points_xyz.shape) _, sparse_grid_idx, sampled_pnt_idx = mvs_utils.construct_vox_points_closest(points_xyz.cuda() if len(points_xyz) < 80000000 else points_xyz[::(len(points_xyz) // 80000000 + 1), ...].cuda(), vox_res) points_xyz = points_xyz[sampled_pnt_idx, :] print("after voxelize:", points_xyz.shape) points_xyz_holder = torch.cat([points_xyz_holder, points_xyz], dim=0) points_xyz_all = points_xyz_holder if opt.resample_pnts > 0: if opt.resample_pnts == 1: print("points_xyz_all",points_xyz_all.shape) inds = torch.min(torch.norm(points_xyz_all, dim=-1, keepdim=True), dim=0)[1] # use the point closest to the origin else: inds = torch.randperm(len(points_xyz_all))[:opt.resample_pnts, ...] points_xyz_all = points_xyz_all[inds, ...] campos, camdir = train_dataset.get_campos_ray() cam_ind = nearest_view(campos, camdir, points_xyz_all, train_dataset.id_list) unique_cam_ind = torch.unique(cam_ind) print("unique_cam_ind", unique_cam_ind.shape) points_xyz_all = [points_xyz_all[cam_ind[:,0]==unique_cam_ind[i], :] for i in range(len(unique_cam_ind))] featuredim = opt.point_features_dim points_embedding_all = torch.zeros([1, 0, featuredim], device=unique_cam_ind.device, dtype=torch.float32) points_color_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32) points_dir_all = torch.zeros([1, 0, 3], device=unique_cam_ind.device, dtype=torch.float32) points_conf_all = torch.zeros([1, 0, 1], device=unique_cam_ind.device, dtype=torch.float32) print("extract points embeding & colors", ) for i in tqdm(range(len(unique_cam_ind))): id = unique_cam_ind[i] batch = train_dataset.get_item(id, full_img=True) HDWD = [train_dataset.height, train_dataset.width] c2w = batch["c2w"][0].cuda() w2c = torch.inverse(c2w) intrinsic = batch["intrinsic"].cuda() # cam_xyz_all 252, 4 cam_xyz_all = (torch.cat([points_xyz_all[i], torch.ones_like(points_xyz_all[i][...,-1:])], dim=-1) @ w2c.transpose(0,1))[..., :3] embedding, color, dir, conf = model.query_embedding(HDWD, cam_xyz_all[None,...], None, batch['images'].cuda(), c2w[None, None,...], w2c[None, None,...], intrinsic[:, None,...], 0, pointdir_w=True) conf = conf * opt.default_conf if opt.default_conf > 0 and opt.default_conf < 1.0 else conf points_embedding_all = torch.cat([points_embedding_all, embedding], dim=1) points_color_all = torch.cat([points_color_all, color], dim=1) points_dir_all = torch.cat([points_dir_all, dir], dim=1) points_conf_all = torch.cat([points_conf_all, conf], dim=1) # visualizer.save_neural_points(id, cam_xyz_all, color, batch, save_ref=True) points_xyz_all=torch.cat(points_xyz_all, dim=0) visualizer.save_neural_points("init", points_xyz_all, points_color_all, None, save_ref=load_points == 0) print("vis") # visualizer.save_neural_points("cam", campos, None, None, None) # print("vis") # exit() opt.resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(opt.resume_dir) opt.is_train = True opt.mode = 2 model = create_model(opt) if points_xyz_all is not None: if opt.bgmodel.startswith("planepoints"): gen_pnts, gen_embedding, gen_dir, gen_color, gen_conf = train_dataset.get_plane_param_points() visualizer.save_neural_points("pl", gen_pnts, gen_color, None, save_ref=False) print("vis pl") points_xyz_all = torch.cat([points_xyz_all, gen_pnts], dim=0) points_embedding_all = torch.cat([points_embedding_all, gen_embedding], dim=1) points_color_all = torch.cat([points_color_all, gen_dir], dim=1) points_dir_all = torch.cat([points_dir_all, gen_color], dim=1) points_conf_all = torch.cat([points_conf_all, gen_conf], dim=1) model.set_points(points_xyz_all.cuda(), points_embedding_all.cuda(), points_color=points_color_all.cuda(), points_dir=points_dir_all.cuda(), points_conf=points_conf_all.cuda(), Rw2c=normRw2c.cuda() if opt.load_points < 1 and opt.normview != 3 else None) epoch_count = 1 total_steps = 0 del points_xyz_all, points_embedding_all, points_color_all, points_dir_all, points_conf_all model.setup(opt, train_len=len(train_dataset)) model.train() data_loader = create_data_loader(opt, dataset=train_dataset) dataset_size = len(data_loader) visualizer.print_details('# training images = {}'.format(dataset_size)) # create test loader test_opt = copy.deepcopy(opt) test_opt.is_train = False test_opt.random_sample = 'no_crop' test_opt.random_sample_size = min(48, opt.random_sample_size) test_opt.batch_size = 1 test_opt.n_threads = 0 test_opt.prob = 0 test_opt.split = "test" with open('/tmp/.neural-volumetric.name', 'w') as f: f.write(opt.name + '\n') visualizer.reset() if total_steps > 0: for scheduler in model.schedulers: for i in range(total_steps): scheduler.step() fg_masks = None bg_ray_train_lst, bg_ray_test_lst = [], [] if opt.bgmodel.endswith("plane"): test_dataset = create_dataset(test_opt) bg_ray_train_lst = create_all_bg(train_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst) bg_ray_test_lst = create_all_bg(test_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst) test_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_test_lst] del test_dataset if opt.vid > 0: render_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step) bg_ray_render_lst = create_all_bg(render_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, dummy=True) render_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_render_lst] else: test_bg_info, render_bg_info = None, None img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = None, None, None, None, None ############ initial test ############### if total_steps == 0 and opt.maximum_step <= 0: with torch.no_grad(): test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.name = opt.name + "/test_{}".format(total_steps) test_opt.test_num_step = opt.test_num_step test_dataset = create_dataset(test_opt) model.opt.is_train = 0 model.opt.no_loss = 1 test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps) model.opt.no_loss = 0 model.opt.is_train = 1 model.train() exit() if total_steps == 0 and (len(train_dataset.id_list) > 30 or len(train_dataset.view_id_list) > 30): other_states = { 'epoch_count': 0, 'total_steps': total_steps, } model.save_networks(total_steps, other_states) visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, 0, total_steps)) real_start=total_steps train_random_sample_size = opt.random_sample_size for epoch in range(epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() for i, data in enumerate(data_loader): if opt.maximum_step is not None and total_steps >= opt.maximum_step: break if opt.prune_iter > 0 and real_start != total_steps and total_steps % opt.prune_iter == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0 and total_steps <= opt.prune_max_iter: with torch.no_grad(): model.clean_optimizer() model.clean_scheduler() model.prune_points(opt.prune_thresh) model.setup_optimizer(opt) model.init_scheduler(total_steps, opt) torch.cuda.empty_cache() torch.cuda.synchronize() if opt.prob_freq > 0 and real_start != total_steps and total_steps % opt.prob_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0: if opt.prob_kernel_size is not None: tier = np.sum(np.asarray(opt.prob_tiers) < total_steps) if (model.top_ray_miss_loss[0] > 1e-5 or opt.prob_mode != 0 or opt.far_thresh > 0) and (opt.prob_kernel_size is None or tier < (len(opt.prob_kernel_size) // 3)): torch.cuda.empty_cache() model.opt.is_train = 0 model.opt.no_loss = 1 with torch.no_grad(): prob_opt = copy.deepcopy(test_opt) prob_opt.name = opt.name # if opt.prob_type=0: train_dataset.opt.random_sample = "no_crop" if opt.prob_mode <= 0: train_dataset.opt.random_sample_size = min(32, train_random_sample_size) prob_dataset = train_dataset elif opt.prob_mode == 1: prob_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1) else: prob_dataset = create_comb_dataset(test_opt, opt, total_steps, test_num_step=1) model.eval() add_xyz, add_embedding, add_color, add_dir, add_conf = probe_hole(model, prob_dataset, Visualizer(prob_opt), prob_opt, None, test_steps=total_steps, opacity_thresh=opt.prob_thresh) torch.cuda.empty_cache() torch.cuda.synchronize() if opt.prob_mode != 0: del prob_dataset # else: if len(add_xyz) > 0: print("len(add_xyz)", len(add_xyz)) model.clean_optimizer_scheduler() model.grow_points(add_xyz, add_embedding, add_color, add_dir, add_conf) length_added = len(add_xyz) del add_xyz, add_embedding, add_color, add_dir, add_conf torch.cuda.empty_cache() torch.cuda.synchronize() other_states = { "best_PSNR": best_PSNR, "best_iter": best_iter, 'epoch_count': epoch, 'total_steps': total_steps, } visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) print("other_states",other_states) model.save_networks(total_steps, other_states, back_gpu=False) visualizer.print_details( "$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(length_added, len(model.neural_points.xyz))) # model.reset_optimizer(opt) # model.reset_scheduler(total_steps, opt) # model.cleanup() # pprint(vars(model)) # del model # visualizer.reset() # gc.collect() # torch.cuda.synchronize() # torch.cuda.empty_cache() # input("Press Enter to continue...") # opt.is_train = 1 # opt.no_loss = 0 # model = create_model(opt) # # model.setup(opt, train_len=len(train_dataset)) # model.train() # # if total_steps > 0: # for scheduler in model.schedulers: # for i in range(total_steps): # scheduler.step() exit() visualizer.print_details("$$$$$$$$$$$$$$$$$$$$$$$$$$ add grow new points num: {}, all num: {} $$$$$$$$$$$$$$$$".format(len(add_xyz), len(model.neural_points.xyz))) train_dataset.opt.random_sample = "random" model.train() model.opt.no_loss = 0 model.opt.is_train = 1 train_dataset.opt.random_sample_size = train_random_sample_size torch.cuda.synchronize() torch.cuda.empty_cache() else: visualizer.print_details( 'nothing to probe, max ray miss is only {}'.format(model.top_ray_miss_loss[0])) total_steps += 1 model.set_input(data) if opt.bgmodel.endswith("plane"): if len(bg_ray_train_lst) > 0: bg_ray_all = bg_ray_train_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(model.input) bg_ray, fg_masks = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks=fg_masks) data["bg_ray"] = bg_ray model.optimize_parameters(total_steps=total_steps) losses = model.get_current_losses() visualizer.accumulate_losses(losses) if opt.lr_policy.startswith("iter"): model.update_learning_rate(opt=opt, total_steps=total_steps) if total_steps and total_steps % opt.print_freq == 0: if opt.show_tensorboard: visualizer.plot_current_losses_with_tb(total_steps, losses) visualizer.print_losses(total_steps) visualizer.reset() if hasattr(opt, "save_point_freq") and total_steps and total_steps % opt.save_point_freq == 0 and (opt.prune_iter > 0 and total_steps <= opt.prune_max_iter or opt.save_point_freq==1): visualizer.save_neural_points(total_steps, model.neural_points.xyz, model.neural_points.points_embeding, data, save_ref=opt.load_points==0) visualizer.print_details('saving neural points at total_steps {})'.format(total_steps)) try: if total_steps == 10000 or (total_steps % opt.save_iter_freq == 0 and total_steps > 0): other_states = { "best_PSNR": best_PSNR, "best_iter": best_iter, 'epoch_count': epoch, 'total_steps': total_steps, } visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) model.save_networks(total_steps, other_states) except Exception as e: visualizer.print_details(e) if opt.vid > 0 and total_steps % opt.vid == 0 and total_steps > 0: torch.cuda.empty_cache() test_dataset = create_render_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step) model.opt.is_train = 0 model.opt.no_loss = 1 with torch.no_grad(): render_vid(model, test_dataset, Visualizer(test_opt), test_opt, render_bg_info, steps=total_steps) model.opt.no_loss = 0 model.opt.is_train = 1 del test_dataset if total_steps == 10000 or (total_steps % opt.test_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0): torch.cuda.empty_cache() test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=opt.test_num_step) model.opt.is_train = 0 model.opt.no_loss = 1 with torch.no_grad(): if opt.test_train == 0: test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True) else: train_dataset.opt.random_sample = "no_crop" test_psnr = test(model, train_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, lpips=True) train_dataset.opt.random_sample = opt.random_sample model.opt.no_loss = 0 model.opt.is_train = 1 del test_dataset best_iter = total_steps if test_psnr > best_PSNR else best_iter best_PSNR = max(test_psnr, best_PSNR) visualizer.print_details(f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}") model.train() # try: # print("saving the model at the end of epoch") # other_states = {'epoch_count': epoch, 'total_steps': total_steps} # model.save_networks('latest', other_states) # # except Exception as e: # print(e) if opt.maximum_step is not None and total_steps >= opt.maximum_step: visualizer.print_details('{}: End of stepts {} / {} \t Time Taken: {} sec'.format( opt.name, total_steps, opt.maximum_step, time.time() - epoch_start_time)) break del train_dataset other_states = { 'epoch_count': epoch, 'total_steps': total_steps, } visualizer.print_details('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) model.save_networks(total_steps, other_states) torch.cuda.empty_cache() test_dataset = create_test_dataset(test_opt, opt, total_steps, test_num_step=1) model.opt.no_loss = 1 model.opt.is_train = 0 visualizer.print_details("full datasets test:") with torch.no_grad(): test_psnr = test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=total_steps, gen_vid=True, lpips=True) best_iter = total_steps if test_psnr > best_PSNR else best_iter best_PSNR = max(test_psnr, best_PSNR) visualizer.print_details( f"test at iter {total_steps}, PSNR: {test_psnr}, best_PSNR: {best_PSNR}, best_iter: {best_iter}") exit() def save_points_conf(visualizer, xyz, points_color, points_conf, total_steps): print("total:", xyz.shape, points_color.shape, points_conf.shape) colors, confs = points_color[0], points_conf[0,...,0] pre = -1000 for i in range(12): thresh = (i * 0.1) if i <= 10 else 1000 mask = ((confs <= thresh) * (confs > pre)) > 0 thresh_xyz = xyz[mask, :] thresh_color = colors[mask, :] visualizer.save_neural_points(f"{total_steps}-{thresh}", thresh_xyz, thresh_color[None, ...], None, save_ref=False) pre = thresh exit() def create_render_dataset(test_opt, opt, total_steps, test_num_step=1): test_opt.nerf_splits = ["render"] test_opt.split = "render" test_opt.name = opt.name + "/vid_{}".format(total_steps) test_opt.test_num_step = test_num_step test_opt.random_sample_size = 30 test_dataset = create_dataset(test_opt) return test_dataset def create_test_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1): test_opt.prob = prob if prob is not None else test_opt.prob test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.name = opt.name + "/test_{}".format(total_steps) test_opt.test_num_step = test_num_step test_dataset = create_dataset(test_opt) return test_dataset def create_comb_dataset(test_opt, opt, total_steps, prob=None, test_num_step=1): test_opt.prob = prob if prob is not None else test_opt.prob test_opt.nerf_splits = ["comb"] test_opt.split = "comb" test_opt.name = opt.name + "/comb_{}".format(total_steps) test_opt.test_num_step = test_num_step test_dataset = create_dataset(test_opt) return test_dataset if __name__ == '__main__': main()
60,882
55.268946
392
py
pointnerf
pointnerf-master/run/test_ft.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import glob import copy import torch import numpy as np import time from options import TrainOptions from data import create_data_loader, create_dataset from models import create_model from models.mvs.mvs_points_model import MvsPointsModel from models.mvs import mvs_utils, filter_utils from pprint import pprint from utils.visualizer import Visualizer from utils import format as fmt from run.evaluate import report_metrics torch.manual_seed(0) np.random.seed(0) import random import cv2 from PIL import Image from tqdm import tqdm import gc def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) def save_image(img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def nearest_view(campos, raydir, xyz, id_list): cam_ind = torch.zeros([0,1], device=campos.device, dtype=torch.long) step=10000 for i in range(0, len(xyz), step): dists = xyz[i:min(len(xyz),i+step), None, :] - campos[None, ...] # N, M, 3 dists_norm = torch.norm(dists, dim=-1) # N, M dists_dir = dists / (dists_norm[...,None]+1e-6) # N, M, 3 dists = dists_norm / 200 + (1.1 - torch.sum(dists_dir * raydir[None, :],dim=-1)) # N, M cam_ind = torch.cat([cam_ind, torch.argmin(dists, dim=1).view(-1,1)], dim=0) # N, 1 return cam_ind def masking(mask, firstdim_lst, seconddim_lst): first_lst = [item[mask, ...] if item is not None else None for item in firstdim_lst] second_lst = [item[:, mask, ...] if item is not None else None for item in seconddim_lst] return first_lst, second_lst def render_vid(model, dataset, visualizer, opt, bg_info, steps=0, gen_vid=True): print('-----------------------------------Rendering-----------------------------------') model.eval() total_num = dataset.total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() for i in range(0, total_num): data = dataset.get_dummyrot_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() # cam_posts.append(data['campos']) # cam_dirs.append(data['raydir'] + data['campos'][None,...]) # continue visuals = None stime = time.time() for k in range(0, height * width, chunk_size): start = k end = min([k + chunk_size, height * width]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] # print("tmpgts", tmpgts["gt_image"].shape) # print(data["pixel_idx"]) model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray model.test() curr_visuals = model.get_current_visuals(data=data) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if key == "gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height * width, 3)).astype(chunk.dtype) visuals[key][start:end, :] = chunk else: for key, value in curr_visuals.items(): if key == "gt_image": continue visuals[key][start:end, :] = value.cpu().numpy() for key, value in visuals.items(): visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i) # visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False) # visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False) # print("vis") # exit() print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num), 0) print('--------------------------------Finish generating vid--------------------------------') return def test(model, dataset, visualizer, opt, bg_info, test_steps=0, gen_vid=True, lpips=True): print('-----------------------------------Testing-----------------------------------') model.eval() total_num = dataset.total print("test set size {}, interval {}".format(total_num, opt.test_num_step)) # 1 if test_steps == 10000 else opt.test_num_step patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() count = 0; for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step data = dataset.get_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() edge_mask = torch.zeros([height, width], dtype=torch.bool) edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1 edge_mask=edge_mask.reshape(-1) > 0 np_edge_mask=edge_mask.numpy().astype(bool) totalpixel = pixel_idx.shape[1] tmpgts = {} tmpgts["gt_image"] = data['gt_image'].clone() tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None print("data['gt_image']") # data.pop('gt_image', None) data.pop('gt_mask', None) visuals = None stime = time.time() ray_masks = [] for k in range(0, totalpixel, chunk_size): start = k end = min([k + chunk_size, totalpixel]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray model.test() curr_visuals = model.get_current_visuals(data=data) chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk else: for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy() if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: ray_masks.append(model.output["ray_mask"] > 0) if len(ray_masks) > 0: ray_masks = torch.cat(ray_masks, dim=1) gt_image = torch.zeros((height*width, 3), dtype=torch.float32) gt_image[edge_mask, :] = tmpgts['gt_image'].clone() if 'gt_image' in model.visual_names: visuals['gt_image'] = gt_image if 'gt_mask' in curr_visuals: visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask'] if 'ray_masked_coarse_raycolor' in model.visual_names: visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape) visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 if 'ray_depth_masked_coarse_raycolor' in model.visual_names: visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'ray_depth_masked_gt_image' in model.visual_names: visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'gt_image_ray_masked' in model.visual_names: visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 for key, value in visuals.items(): if key in opt.visual_items: visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i, opt=opt) acc_dict = {} if "coarse_raycolor" in opt.test_color_loss_items: loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), gt_image.view(1, -1, 3).cuda()) acc_dict.update({"coarse_raycolor": loss}) print("coarse_raycolor", loss, mse2psnr(loss)) if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3) ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3) loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss))) if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items: ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1) masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)) ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3)) loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_depth_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss))) print(acc_dict.items()) visualizer.accumulate_losses(acc_dict) count+=1 visualizer.print_losses(count) psnr = visualizer.get_psnr(opt.test_color_loss_items[0]) print('--------------------------------Finish Test Rendering--------------------------------') report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "lpips", "vgglpips", "rmse"] if lpips else ["psnr", "ssim", "rmse"], [i for i in range(0, total_num, opt.test_num_step)], imgStr="step-%04d-{}.png".format(opt.visual_items[0]),gtStr="step-%04d-{}.png".format(opt.visual_items[1])) print('--------------------------------Finish Evaluation--------------------------------') if gen_vid: del dataset visualizer.gen_video("coarse_raycolor", range(0, total_num, opt.test_num_step), test_steps) print('--------------------------------Finish generating vid--------------------------------') return psnr def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def main(): torch.backends.cudnn.benchmark = True opt = TrainOptions().parse() cur_device = torch.device('cuda:{}'.format(opt.gpu_ids[0]) if opt. gpu_ids else torch.device('cpu')) print("opt.color_loss_items ", opt.color_loss_items) if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) visualizer = Visualizer(opt) train_dataset = create_dataset(opt) img_lst=None with torch.no_grad(): print(opt.checkpoints_dir + opt.name + "/*_net_ray_marching.pth") if opt.bgmodel.endswith("plane"): _, _, _, _, _, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst = gen_points_filter_embeddings(train_dataset, visualizer, opt) resume_dir = os.path.join(opt.checkpoints_dir, opt.name) if opt.resume_iter == "best": opt.resume_iter = "latest" resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir) if resume_iter is None: visualizer.print_details("No previous checkpoints at iter {} !!", resume_iter) exit() else: opt.resume_iter = resume_iter visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') visualizer.print_details('test at {} iters'.format(opt.resume_iter)) visualizer.print_details(f"Iter: {resume_iter}") visualizer.print_details('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') opt.mode = 2 opt.load_points=1 opt.resume_dir=resume_dir opt.resume_iter = resume_iter opt.is_train=True model = create_model(opt) model.setup(opt, train_len=len(train_dataset)) # create test loader test_opt = copy.deepcopy(opt) test_opt.is_train = False test_opt.random_sample = 'no_crop' test_opt.random_sample_size = min(48, opt.random_sample_size) test_opt.batch_size = 1 test_opt.n_threads = 0 test_opt.prob = 0 test_opt.split = "test" visualizer.reset() fg_masks = None test_bg_info = None if opt.bgmodel.endswith("plane"): test_dataset = create_dataset(test_opt) bg_ray_test_lst = create_all_bg(test_dataset, model, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst) test_bg_info = [img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_test_lst] del test_dataset # if opt.vid > 0: # render_dataset = create_render_dataset(test_opt, opt, resume_iter, test_num_step=opt.test_num_step) ############ initial test ############### with torch.no_grad(): test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.name = opt.name + "/test_{}".format(resume_iter) test_opt.test_num_step = opt.test_num_step test_dataset = create_dataset(test_opt) model.opt.is_train = 0 model.opt.no_loss = 1 test(model, test_dataset, Visualizer(test_opt), test_opt, test_bg_info, test_steps=resume_iter) if __name__ == '__main__': main()
17,612
48.754237
331
py
pointnerf
pointnerf-master/run/train.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import copy import torch import numpy as np import time from options import TrainOptions from data import create_data_loader, create_dataset from models import create_model from utils.visualizer import Visualizer from utils import format as fmt from run.evaluate import report_metrics from render_vid import render_vid torch.manual_seed(0) np.random.seed(0) def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) def test(model, dataset, visualizer, opt, test_steps=0): print('-----------------------------------Testing-----------------------------------') model.eval() total_num = dataset.total patch_size = opt.random_sample_size chunk_size = patch_size * patch_size height = dataset.height width = dataset.width visualizer.reset() count=0 for i in range(0, total_num, opt.test_num_step): # 1 if test_steps == 10000 else opt.test_num_step data = dataset.get_item(i) raydir = data['raydir'].clone() pixel_idx = data['pixel_idx'].view(data['pixel_idx'].shape[0], -1, data['pixel_idx'].shape[3]).clone() edge_mask = torch.zeros([height, width], dtype=torch.bool) edge_mask[pixel_idx[0,...,1].to(torch.long), pixel_idx[0,...,0].to(torch.long)] = 1 edge_mask=edge_mask.reshape(-1) > 0 np_edge_mask=edge_mask.numpy().astype(bool) totalpixel = pixel_idx.shape[1] tmpgts = {} tmpgts["gt_image"] = data['gt_image'].clone() tmpgts["gt_mask"] = data['gt_mask'].clone() if "gt_mask" in data else None # data.pop('gt_image', None) data.pop('gt_mask', None) visuals = None stime = time.time() ray_masks = [] ray_depth_masks = [] xyz_world_sect_plane_lst = [] for k in range(0, totalpixel, chunk_size): start = k end = min([k + chunk_size, totalpixel]) data['raydir'] = raydir[:, start:end, :] data["pixel_idx"] = pixel_idx[:, start:end, :] model.set_input(data) if opt.bgmodel.endswith("plane"): img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, fg_masks, bg_ray_lst = bg_info if len(bg_ray_lst) > 0: bg_ray_all = bg_ray_lst[data["id"]] bg_idx = data["pixel_idx"].view(-1,2) bg_ray = bg_ray_all[:, bg_idx[:,1].long(), bg_idx[:,0].long(), :] else: xyz_world_sect_plane = mvs_utils.gen_bg_points(data) bg_ray, _ = model.set_bg(xyz_world_sect_plane, img_lst, c2ws_lst, w2cs_lst, intrinsics_all, HDWD_lst, data["plane_color"], fg_masks=fg_masks, vis=visualizer) data["bg_ray"] = bg_ray # xyz_world_sect_plane_lst.append(xyz_world_sect_plane) model.test(gen_points=True) curr_visuals = model.get_current_visuals(data=data) # print("loss", mse2psnr(torch.nn.MSELoss().to("cuda")(curr_visuals['coarse_raycolor'], tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :].cuda()))) # print("sum", torch.sum(torch.square(tmpgts["gt_image"].view(1, -1, 3)[:, start:end, :] - tmpgts["gt_image"].view(height, width, 3)[data["pixel_idx"][0,...,1].long(), data["pixel_idx"][0,...,0].long(),:]))) chunk_pixel_id = data["pixel_idx"].cpu().numpy().astype(np.int32) if visuals is None: visuals = {} for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue chunk = value.cpu().numpy() visuals[key] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = chunk else: for key, value in curr_visuals.items(): if value is None or key=="gt_image": continue visuals[key][chunk_pixel_id[0,...,1], chunk_pixel_id[0,...,0], :] = value.cpu().numpy() if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: ray_masks.append(model.output["ray_mask"] > 0) ray_masks = torch.cat(ray_masks, dim=1) # visualizer.save_neural_points(data["id"].cpu().numpy()[0], (raydir.cuda() + data["campos"][:, None, :]).squeeze(0), None, data, save_ref=True) # exit() # print("curr_visuals",curr_visuals) pixel_idx=pixel_idx.to(torch.long) if 'gt_image' in model.visual_names: visuals['gt_image'] = torch.zeros((height*width, 3), dtype=torch.float32) visuals['gt_image'][edge_mask,:] = tmpgts['gt_image'].clone() if 'gt_mask' in curr_visuals: visuals['gt_mask'] = np.zeros((height, width, 3)).astype(chunk.dtype) visuals['gt_mask'][np_edge_mask,:] = tmpgts['gt_mask'] if 'ray_masked_coarse_raycolor' in model.visual_names: visuals['ray_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) print(visuals['ray_masked_coarse_raycolor'].shape, ray_masks.cpu().numpy().shape) visuals['ray_masked_coarse_raycolor'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 if 'ray_depth_masked_coarse_raycolor' in model.visual_names: visuals['ray_depth_masked_coarse_raycolor'] = np.copy(visuals["coarse_raycolor"]).reshape(height, width, 3) visuals['ray_depth_masked_coarse_raycolor'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'ray_depth_masked_gt_image' in model.visual_names: visuals['ray_depth_masked_gt_image'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['ray_depth_masked_gt_image'][model.output["ray_depth_mask"][0].cpu().numpy() <= 0] = 0.0 if 'gt_image_ray_masked' in model.visual_names: visuals['gt_image_ray_masked'] = np.copy(tmpgts['gt_image']).reshape(height, width, 3) visuals['gt_image_ray_masked'][ray_masks.view(height, width).cpu().numpy() <= 0,:] = 0.0 for key, value in visuals.items(): visualizer.print_details("{}:{}".format(key, visuals[key].shape)) visuals[key] = visuals[key].reshape(height, width, 3) print("num.{} in {} cases: time used: {} s".format(i, total_num // opt.test_num_step, time.time() - stime), " at ", visualizer.image_dir) visualizer.display_current_results(visuals, i) acc_dict = {} if "coarse_raycolor" in opt.test_color_loss_items: loss = torch.nn.MSELoss().to("cuda")(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), visuals["gt_image"].view(1, -1, 3).cuda()) acc_dict.update({"coarse_raycolor": loss}) print("coarse_raycolor", loss, mse2psnr(loss)) if "ray_mask" in model.output and "ray_masked_coarse_raycolor" in opt.test_color_loss_items: masked_gt = tmpgts["gt_image"].view(1, -1, 3).cuda()[ray_masks,:].reshape(1, -1, 3) ray_masked_coarse_raycolor = torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3)[:,edge_mask,:][ray_masks,:].reshape(1, -1, 3) # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_gt") # filepath = os.path.join("/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # tmpgtssave = tmpgts["gt_image"].view(1, -1, 3).clone() # tmpgtssave[~ray_masks,:] = 1.0 # img = np.array(tmpgtssave.view(height,width,3)) # save_image(img, filepath) # # filename = 'step-{:04d}-{}-vali.png'.format(i, "masked_coarse_raycolor") # filepath = os.path.join( # "/home/xharlie/user_space/codes/testNr/checkpoints/fdtu_try/test_{}/images".format(38), filename) # csave = torch.zeros_like(tmpgts["gt_image"].view(1, -1, 3)) # csave[~ray_masks, :] = 1.0 # csave[ray_masks, :] = torch.as_tensor(visuals["coarse_raycolor"]).view(1, -1, 3)[ray_masks,:] # img = np.array(csave.view(height, width, 3)) # save_image(img, filepath) loss = torch.nn.MSELoss().to("cuda")(ray_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_masked_coarse_raycolor", loss, mse2psnr(loss))) if "ray_depth_mask" in model.output and "ray_depth_masked_coarse_raycolor" in opt.test_color_loss_items: ray_depth_masks = model.output["ray_depth_mask"].reshape(model.output["ray_depth_mask"].shape[0], -1) masked_gt = torch.masked_select(tmpgts["gt_image"].view(1, -1, 3).cuda(), (ray_depth_masks[..., None].expand(-1, -1, 3)).reshape(1, -1, 3)) ray_depth_masked_coarse_raycolor = torch.masked_select(torch.as_tensor(visuals["coarse_raycolor"], device="cuda").view(1, -1, 3), ray_depth_masks[..., None].expand(-1, -1, 3).reshape(1, -1, 3)) loss = torch.nn.MSELoss().to("cuda")(ray_depth_masked_coarse_raycolor, masked_gt) acc_dict.update({"ray_depth_masked_coarse_raycolor": loss}) visualizer.print_details("{} loss:{}, PSNR:{}".format("ray_depth_masked_coarse_raycolor", loss, mse2psnr(loss))) print(acc_dict.items()) visualizer.accumulate_losses(acc_dict) count += 1 visualizer.print_losses(count) # psnr = visualizer.get_psnr(opt.test_color_loss_items[0]) visualizer.reset() print('--------------------------------Finish Test Rendering--------------------------------') report_metrics(visualizer.image_dir, visualizer.image_dir, visualizer.image_dir, ["psnr", "ssim", "rmse"], [i for i in range(0, count)], imgStr="step-%04d-{}_raycolor.png".format("coarse")) print('--------------------------------Finish Evaluation--------------------------------') return def get_latest_epoch(resume_dir): os.makedirs(resume_dir, exist_ok=True) str_epoch = [file.split("_")[0] for file in os.listdir(resume_dir) if file.endswith("_states.pth")] int_epoch = [int(i) for i in str_epoch] return None if len(int_epoch) == 0 else str_epoch[int_epoch.index(max(int_epoch))] def main(): torch.backends.cudnn.benchmark = True opt = TrainOptions().parse() print("opt.color_loss_items ", opt.color_loss_items) if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) data_loader = create_data_loader(opt) dataset_size = len(data_loader) print('# training images = {}'.format(dataset_size)) if opt.resume_dir: resume_dir = opt.resume_dir resume_iter = opt.resume_iter if opt.resume_iter != "latest" else get_latest_epoch(resume_dir) opt.resume_iter = resume_iter if resume_iter is None: epoch_count = 1 total_steps = 0 print("No previous checkpoints, start from scratch!!!!") else: opt.resume_iter = resume_iter states = torch.load( os.path.join(resume_dir, '{}_states.pth'.format(resume_iter))) epoch_count = states['epoch_count'] total_steps = states['total_steps'] print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Continue training from {} epoch'.format(opt.resume_iter)) print("Iter: ", total_steps) print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') else: epoch_count = 1 total_steps = 0 print("opt.resume_dir ", opt.resume_dir, opt.resume_iter) # load model model = create_model(opt) model.setup(opt) visualizer = Visualizer(opt) # create test loader test_opt = copy.deepcopy(opt) test_opt.is_train = False test_opt.random_sample = 'no_crop' test_opt.random_sample_size = min(32, opt.random_sample_size) test_opt.batch_size = 1 test_opt.n_threads = 0 test_opt.split = "test" # test_dataset = create_dataset(test_opt) with open('/tmp/.neural-volumetric.name', 'w') as f: f.write(opt.name + '\n') visualizer.reset() if total_steps > 0: for scheduler in model.schedulers: for i in range(total_steps): scheduler.step() for epoch in range(epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() epoch_iter = 0 for i, data in enumerate(data_loader): if opt.maximum_step is not None and total_steps >= opt.maximum_step: break total_steps += 1 epoch_iter += 1 model.set_input(data) model.optimize_parameters(total_steps=total_steps) losses = model.get_current_losses() visualizer.accumulate_losses(losses) if opt.lr_policy.startswith("iter"): model.update_learning_rate(opt=opt, total_steps=total_steps) if total_steps and total_steps % opt.print_freq == 0: if opt.show_tensorboard: visualizer.plot_current_losses_with_tb(total_steps, losses) visualizer.print_losses(total_steps) visualizer.reset() if hasattr(opt, "save_point_freq") and total_steps and total_steps % opt.save_point_freq == 0: visualizer.save_neural_points(total_steps, model.neural_points.xyz, model.neural_points.points_embeding, data, save_ref=opt.load_points==0) # if opt.train_and_test == 1 and total_steps % opt.test_freq == 0: # test(model, test_dataset, visualizer, test_opt, total_steps) # if opt.vid == 1 and total_steps % opt.test_freq == 0: # model.opt.no_loss = 1 # render_vid(model, test_dataset, visualizer, test_opt, total_steps) # model.opt.no_loss = 0 try: if total_steps % opt.save_iter_freq == 0 and total_steps > 0: other_states = { 'epoch_count': epoch, 'total_steps': total_steps, } print('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) model.save_networks(total_steps, other_states) # if opt.vid == 1: # model.opt.is_train = 0 # model.opt.no_loss = 1 # test_opt.nerf_splits = ["test"] # test_opt.name = opt.name + "/test_{}".format(total_steps) # test_opt.test_num = 999 # render_vid(model, test_dataset, Visualizer(test_opt), test_opt, total_steps) # model.opt.no_loss = 0 # model.opt.is_train = 1 except Exception as e: print(e) if total_steps % opt.test_freq == 0 and total_steps < (opt.maximum_step - 1) and total_steps > 0: test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.name = opt.name + "/test_{}".format(total_steps) test_opt.test_num_step = opt.test_num_step test_dataset = create_dataset(test_opt) model.opt.is_train = 0 model.opt.no_loss = 1 test(model, test_dataset, Visualizer(test_opt), test_opt, total_steps) model.opt.no_loss = 0 model.opt.is_train = 1 # try: # print("saving the model at the end of epoch") # other_states = {'epoch_count': epoch, 'total_steps': total_steps} # model.save_networks('latest', other_states) # # except Exception as e: # print(e) if opt.vid == 1: model.opt.is_train = 0 model.opt.no_loss = 1 render_vid(model, test_dataset, visualizer, test_opt, total_steps) model.opt.no_loss = 0 model.opt.is_train = 1 if opt.maximum_step is not None and total_steps == opt.maximum_step: print('{}: End of stepts {} / {} \t Time Taken: {} sec'.format( opt.name, total_steps, opt.maximum_step, time.time() - epoch_start_time)) break print('{}: End of epoch {} / {} \t Time Taken: {} sec'.format( opt.name, epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) if not opt.lr_policy.startswith("iter"): model.update_learning_rate(opt=opt) other_states = { 'epoch_count': epoch, 'total_steps': total_steps, } print('saving model ({}, epoch {}, total_steps {})'.format(opt.name, epoch, total_steps)) model.save_networks(total_steps, other_states) test_opt.nerf_splits = ["test"] test_opt.split = "test" test_opt.test_num_step=1 test_opt.name = opt.name + "/test_{}".format(total_steps) test_dataset = create_dataset(test_opt) model.opt.no_loss = 1 model.opt.is_train = 0 test(model, test_dataset, Visualizer(test_opt), test_opt, total_steps) # model.opt.no_loss = 0 # model.opt.is_train = 1 if __name__ == '__main__': main()
17,776
47.438692
219
py
pointnerf
pointnerf-master/run/visualize.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import copy import torch import numpy as np import time from options import TestOptions from data import create_data_loader, create_dataset from models import create_model from utils.visualizer import Visualizer from utils import format as fmt def main(): torch.backends.cudnn.benchmark = True opt = TestOptions().parse() if opt.debug: torch.autograd.set_detect_anomaly(True) print(fmt.RED + '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Debug Mode') print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' + fmt.END) assert opt.resume_dir is not None resume_dir = opt.resume_dir states = torch.load(os.path.join(resume_dir, '{}_states.pth'.format(opt.resume_iter))) epoch_count = states['epoch_count'] total_steps = states['total_steps'] print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') print('Resume from {} epoch'.format(opt.resume_iter)) print("Iter: ", total_steps) print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++') # load model model = create_model(opt) model.setup(opt) thres = 10 grid, argb = model.net_ray_marching.module.build_point_cloud_visualization(0) mask = argb[..., 0] > thres points = grid[mask] colors = argb[mask][..., 1:4] import pyrender mesh = pyrender.Mesh.from_points(points, colors=colors) scene = pyrender.Scene() scene.add(mesh) pyrender.Viewer(scene, render_flags={'point_size': 10}, use_raymond_lighting=True) if __name__ == '__main__': main()
1,721
29.75
90
py
pointnerf
pointnerf-master/run/vis_grow_train.py
import sys import os import pathlib sys.path.append(os.path.join(pathlib.Path(__file__).parent.absolute(), '..')) import glob import copy import torch import numpy as np import time from options import TrainOptions from data import create_data_loader, create_dataset from models import create_model from models.mvs.mvs_points_model import MvsPointsModel from models.mvs import mvs_utils, filter_utils from utils.visualizer import Visualizer from utils import format as fmt from run.evaluate import report_metrics # from render_vid import render_vid torch.manual_seed(0) np.random.seed(0) from tqdm import tqdm import cv2 from PIL import Image import imageio from utils.util import to8b def read_image(filepath, dtype=None): image = np.asarray(Image.open(filepath)) if dtype is not None and dtype == np.float32: image = (image / 255).astype(dtype) return image def render_grow(pnt_dir, iters, vids): print('-----------------------------------Rendering Grow-----------------------------------') # visualizer.save_neural_points(200, np.concatenate(cam_posts, axis=0),None, None, save_ref=False) # visualizer.save_neural_points(200, np.concatenate(cam_dirs, axis=0),None, None, save_ref=False) # print("vis") # exit() for t in tqdm(range(len(vids))): vid = vids[t] img_lst = [] for iter in iters: img_dir = os.path.join(pnt_dir, 'prob_img_{}'.format(iter)) # ''step-{:04d}-{}.png'.format(i, name)) img_filepath = os.path.join(img_dir, "step-{}-0-ref0.png".format(vid)) img_arry = read_image(img_filepath, dtype=np.float32) img_lst.append(img_arry) stacked_imgs = [to8b(img_arry) for img_arry in img_lst] filename = 'grow_video_{:04d}.mov'.format(vid) imageio.mimwrite(os.path.join(pnt_dir, filename), stacked_imgs, fps=3, quality=8) filename = 'grow_video_{:04d}.gif'.format(vid) imageio.mimwrite(os.path.join(pnt_dir, filename), stacked_imgs, fps=3, format='GIF') return if __name__ == '__main__': pnt_dir = "/home/xharlie/user_space/codes/testNr/checkpoints/scan103_normcam2_confcolordir_KNN8_LRelu_grid800_dmsk_full2geo0_agg2_zeroone1e4_confree_prl2e3_probe2e3_1_comb/points" iters = list(range(1000, 25000, 1000)) vids = list(range(16, 20)) render_grow(pnt_dir, iters, vids)
2,394
34.746269
183
py
pointnerf
pointnerf-master/utils/visualizer.py
import numpy as np import os from PIL import Image import shutil from collections import OrderedDict import time import datetime import torch import imageio from utils.util import to8b from models.mvs.mvs_utils import * def mse2psnr(x): return -10.* torch.log(x)/np.log(10.) def save_image(img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def save_points(xyz, dir, total_steps): if xyz.ndim < 3: xyz = xyz[None, ...] os.makedirs(dir, exist_ok=True) for i in range(xyz.shape[0]): if isinstance(total_steps,str): filename = 'step-{}-{}.txt'.format(total_steps, i) else: filename = 'step-{:04d}-{}.txt'.format(total_steps, i) filepath = os.path.join(dir, filename) np.savetxt(filepath, xyz[i, ...].reshape(-1, xyz.shape[-1]), delimiter=";") class Visualizer: def __init__(self, opt): self.opt = opt self.log_dir = os.path.join(opt.checkpoints_dir, opt.name) self.image_dir = os.path.join(opt.checkpoints_dir, opt.name, 'images') self.point_dir = os.path.join(opt.checkpoints_dir, opt.name, 'points') self.vid_dir = os.path.join(opt.checkpoints_dir, opt.name, 'vids') os.makedirs(self.vid_dir, exist_ok=True) if opt.show_tensorboard > 0: from tensorboardX import SummaryWriter self.tb_writer = SummaryWriter( os.path.join( opt.checkpoints_dir, opt.name, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))) def save_image(self, img_array, filepath): assert len(img_array.shape) == 2 or (len(img_array.shape) == 3 and img_array.shape[2] in [3, 4]) if img_array.dtype != np.uint8: img_array = (np.clip(img_array, 0, 1) * 255).astype(np.uint8) os.makedirs(os.path.dirname(filepath), exist_ok=True) Image.fromarray(img_array).save(filepath) def read_image(self, filepath, dtype=None): image = np.asarray(Image.open(filepath)) if dtype is not None and dtype==np.float32: image = (image / 255).astype(dtype) return image def display_current_results(self, visuals, total_steps, opt=None): for name, img in visuals.items(): if opt is not None and name in opt.visual_items: img = np.array(img) filename = 'step-{:04d}-{}.png'.format(total_steps, name) filepath = os.path.join(self.image_dir, filename) save_image(img, filepath) def display_video(self, visual_lst, total_steps): for name in visual_lst[0].keys(): stacked_imgs = [to8b(visuals[name]) for visuals in visual_lst] filename = 'video_{:04d}_{}.mov'.format(total_steps, name) imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=5, quality=8) filename = 'video_{:04d}_{}.gif'.format(total_steps, name) imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=5, format='GIF') def gen_video(self, name, steps, total_step): img_lst = [] for i in steps: img_filepath = os.path.join(self.image_dir, 'step-{:04d}-{}.png'.format(i, name)) img_arry = self.read_image(img_filepath, dtype=np.float32) img_lst.append(img_arry) stacked_imgs = [to8b(img_arry) for img_arry in img_lst] filename = 'video_{:04d}_{}.mov'.format(total_step, name) imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=20, quality=10) filename = 'video_{:04d}_{}.gif'.format(total_step, name) imageio.mimwrite(os.path.join(self.vid_dir, filename), stacked_imgs, fps=5, format='GIF') def save_neural_points(self, total_steps, xyz, features, data, save_ref=0): if features is None: if torch.is_tensor(xyz): # xyz = xyz.detach().cpu().numpy() xyz = xyz.detach().cpu().numpy() save_points(xyz, self.point_dir, total_steps) elif features.shape[-1] == 9: pnt_lst = [] for i in range(0,3): points = torch.cat([xyz, features[0, ..., i*3:i*3+3] * 255], dim=-1) if torch.is_tensor(points): # xyz = xyz.detach().cpu().numpy() points = points.detach().cpu().numpy() pnt_lst.append(points) save_points(np.stack(pnt_lst,axis=0), self.point_dir, total_steps) else: points = torch.cat([xyz, features[0, ..., :3] * 255], dim=-1) if torch.is_tensor(points): # xyz = xyz.detach().cpu().numpy() points = points.detach().cpu().numpy() save_points(points, self.point_dir, total_steps) if save_ref and "images" in data: self.save_ref_views(data, total_steps) def save_ref_views(self, data, total_steps, subdir=None): dir = self.point_dir if subdir is None else os.path.join(self.point_dir, subdir) for i in range(data['images'].shape[1]): img = data['images'][0,i].permute(1,2,0).cpu().numpy() filename = 'step-{}-{}-ref{}.png'.format(total_steps, 0, i) filepath = os.path.join(dir, filename) save_image(img, filepath) if data['images'].shape[1] > 3: img = data['images'][0,3].permute(1, 2, 0).cpu().numpy() filename = 'step-{}-{}-trgt.png'.format(total_steps, 0) filepath = os.path.join(dir, filename) save_image(img, filepath) def reset(self): self.start_time = time.time() self.acc_iterations = 0 self.acc_losses = OrderedDict() def accumulate_losses(self, losses): self.acc_iterations += 1 for k, v in losses.items(): if k not in self.acc_losses: self.acc_losses[k] = 0 self.acc_losses[k] += v if k.endswith('raycolor'): psnrkey = k + "_psnr" if psnrkey not in self.acc_losses: self.acc_losses[psnrkey] = 0 self.acc_losses[psnrkey] += mse2psnr(v) def get_psnr(self, key): return self.acc_losses[key + "_psnr"] / self.acc_iterations def print_losses(self, total_steps): m = 'End of iteration {} \t Number of batches {} \t Time taken: {:.2f}s\n'.format( total_steps, self.acc_iterations, (time.time() - self.start_time)) m += '[Average Loss] ' for k, v in self.acc_losses.items(): m += '{}: {:.10f} '.format(k, v / self.acc_iterations) filepath = os.path.join(self.log_dir, 'log.txt') with open(filepath, 'a') as f: f.write(m + '\n') print(m) def print_details(self, str): filepath = os.path.join(self.log_dir, 'log.txt') with open(filepath, 'a') as f: f.write(str + '\n') print(str) def plot_current_losses_with_tb(self, step, losses): if not self.opt.show_tensorboard > 0: return for key in losses.keys(): curr_loss = losses[key] self.tb_writer.add_scalar(key, float(curr_loss), step)
7,619
40.639344
101
py
pointnerf
pointnerf-master/utils/util.py
from __future__ import print_function import torch import numpy as np from PIL import Image import os from torchvision.utils import make_grid from os.path import join import torch.nn.functional as F import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt from scipy.spatial.transform import Rotation as R def mkdir(path): if not os.path.exists(path): os.makedirs(path) def add_property2dict(target_dict, object, props): for prop in props: target_dict[prop] = getattr(object, prop) def normalize(v, axis=0): # axis = 0, normalize each col # axis = 1, normalize each row return v / (np.linalg.norm(v, axis=axis, keepdims=True) + 1e-9) def to8b(x): return (255*np.clip(x, 0, 1)).astype(np.uint8) def gen_render_path(c2ws, N_views=30): N = len(c2ws) rotvec, positions = [], [] rotvec_inteplat, positions_inteplat = [], [] weight = np.linspace(1.0, .0, N_views//3, endpoint=False).reshape(-1, 1) for i in range(N): r = R.from_matrix(c2ws[i, :3, :3]) euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3) if i: mask = np.abs(euler_ange - rotvec[0])>180 euler_ange[mask] += 360.0 rotvec.append(euler_ange) positions.append(c2ws[i, :3, 3:].reshape(1, 3)) if i: rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i]) positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i]) rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0]) positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0]) c2ws_render = [] angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat) for rotvec, position in zip(angles_inteplat, positions_inteplat): c2w = np.eye(4) c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix() c2w[:3, 3:] = position.reshape(3, 1) c2ws_render.append(c2w.copy()) c2ws_render = np.stack(c2ws_render) return c2ws_render def unique_lst(list1): x = np.array(list1) return np.unique(x)
2,196
30.84058
109
py
pointnerf
pointnerf-master/utils/spherical.py
import torch from scipy.special import sph_harm, lpmn, lpmv from scipy.special import factorial import numpy as np import math import time class SphericalHarm(object): def __init__(self, total_deg): self.total_deg = total_deg self.orderIds, self.lIds, self.mIds, self.num_at_deg, self.m0inorder, self.restinorder, self.orderinorg = self.genalpids( self.total_deg) self.sh_ordermIds, self.sh_orderlIds, self.sh_orderIds, self.sh_orderinorg = self.genshids( self.total_deg) self.f2m_1, self.Klm = self.precompff(self.total_deg) self.orderKlm = self.Klm[self.orderIds] # def sh_all(self, theta, phi): # phi = phi.view(-1, 1) # theta = theta.view(-1, 1) def sh_all(self, indirs): indirs = indirs.view(-1, 3) theta = torch.acos(indirs[:, [2]]) phi = torch.atan2(indirs[:, [1]], indirs[:, [0]]) # phi = phi.view(-1, 1) # theta = theta.view(-1, 1) alp = self.associated_lengedre_poly_all(torch.cos(theta)) m0 = alp[:, self.m0inorder] * torch.from_numpy( self.orderKlm[self.m0inorder]).to(theta.device).type(theta.dtype) # print("alp", alp[:, self.m0inorder], self.orderKlm[self.m0inorder]) ms = torch.from_numpy(self.mIds[self.orderIds][self.restinorder]).to( theta.device).type(theta.dtype) restKlm = torch.from_numpy(self.orderKlm[self.restinorder]).to( theta.device).type(theta.dtype) m0p = restKlm * torch.cos(ms * phi) * alp[:, self.restinorder] m0n = restKlm * torch.sin(ms * phi) * alp[:, self.restinorder] # print(phi.shape, m0p.shape) m = torch.cat([m0, m0p, m0n], 1) m = m[:, self.sh_orderinorg] return m def associated_lengedre_poly_all(self, x): x = x.view(-1, 1) l = self.total_deg # alp = torch.ones((x.shape[0], l * (l + 1) // 2), device=x.device) ms = self.mIds[self.orderIds[:l]] somx2 = torch.sqrt((1 - x) * (1 + x)) f2m_1s = torch.from_numpy(self.f2m_1[ms]).to(x.device).type(x.dtype) pmm = torch.pow(-somx2, torch.from_numpy(ms).to(x.device)) * f2m_1s alp = [pmm] t = l - 1 if t > 0: ms = self.mIds[self.orderIds[self.total_deg:self.total_deg + t]] ms = torch.from_numpy(ms).to(x.device) pmp1m = x * (2 * ms + 1) * pmm[:, :t] alp.append(pmp1m) cur = self.total_deg + t for i in range(l - 2): t = l - 2 - i ms = self.mIds[self.orderIds[cur:cur + t]] ms = torch.from_numpy(ms).to(x.device) ls = ms + i + 2 plm = (x * (2 * ls - 1) * pmp1m[:, :t] - (ls + ms - 1) * pmm[:, :t]) / (i + 2) alp.append(plm) pmm = pmp1m pmp1m = plm cur += t alp = torch.cat(alp, 1) return alp def precompff(self, l): f2m_1 = np.arange(l) + 1 f2m_1 = f2m_1 * 2 - 1 f2m_1 = np.cumprod(f2m_1) f2m_1[1:] = f2m_1[:-1] Klm = np.sqrt((2 * self.lIds + 1) * factorial(self.lIds - self.mIds) / (4 * np.pi * factorial(self.lIds + self.mIds))) m_n0 = np.reshape(np.where(self.mIds), -1) Klm[m_n0] *= 2**0.5 return f2m_1, Klm def genalpids(self, l): r_orderIds = np.zeros(l * (l + 1) // 2, dtype=int) n_per_deg = np.arange(l + 1)[1:] num_deg = np.cumsum(n_per_deg) i_order = num_deg - 1 k = 0 for i in range(l): r_orderIds[k:k + len(i_order)] = i_order k += len(i_order) i_order = i_order[:-1] + n_per_deg[i:-1] r_lids = np.zeros(l * (l + 1) // 2, dtype=int) r_mids = np.zeros(l * (l + 1) // 2, dtype=int) k = 0 for i in range(l): r_lids[k:k + i + 1] = i r_mids[k:k + i + 1] = np.arange(i + 1) k += i + 1 r_m0inorder = [0] + list(range(l, 0, -1)) r_m0inorder = np.cumsum(np.asarray(r_m0inorder, dtype=int))[:l] tmp = np.ones_like(r_orderIds) tmp[r_m0inorder] = 0 r_restinorder = np.reshape(np.where(tmp), -1) tmp = np.arange(len(r_orderIds)) r_orderinorg = tmp.copy() r_orderinorg[r_orderIds] = tmp[:] return r_orderIds, r_lids, r_mids, num_deg, r_m0inorder, r_restinorder, r_orderinorg def genshids(self, l): sh_ordermIds = np.zeros(l * l, dtype=int) sh_orderlIds = np.zeros(l * l, dtype=int) sh_orderIds = np.zeros(l * l, dtype=int) sh_ordermIds[:len(self.m0inorder)] = self.mIds[self.orderIds][ self.m0inorder] sh_orderlIds[:len(self.m0inorder)] = self.lIds[self.orderIds][ self.m0inorder] k = len(self.m0inorder) sh_ordermIds[k:k + len(self.restinorder)] = self.mIds[self.orderIds][ self.restinorder] sh_orderlIds[k:k + len(self.restinorder)] = self.lIds[self.orderIds][ self.restinorder] k += len(self.restinorder) sh_ordermIds[k:k + len(self.restinorder)] = -self.mIds[self.orderIds][ self.restinorder] sh_orderlIds[k:k + len(self.restinorder)] = self.lIds[self.orderIds][ self.restinorder] print(k + len(self.restinorder)) sh_orderIds = sh_orderlIds + sh_ordermIds + sh_orderlIds * sh_orderlIds tmp = np.arange(len(sh_orderIds)) sh_orderinorg = tmp.copy() sh_orderinorg[sh_orderIds] = tmp[:] return sh_ordermIds, sh_orderlIds, sh_orderIds, sh_orderinorg class SphericalHarm_table(object): def __init__(self, total_deg): self.total_deg = total_deg print(self.total_deg * self.total_deg) def sh_all(self, indirs, filp_dir=True): indirs = indirs.reshape(-1, 3) x = -indirs[:, [0]] if filp_dir else indirs[:, [0]] y = -indirs[:, [1]] if filp_dir else indirs[:, [1]] z = indirs[:, [2]] if self.total_deg == 1: return self.SH_l0(x, y, z) elif self.total_deg == 2: return self.SH_l1(x, y, z) elif self.total_deg == 3: return self.SH_l2(x, y, z) elif self.total_deg == 4: return self.SH_l3(x, y, z) elif self.total_deg == 5: return self.SH_l4(x, y, z) else: print( "Not supporting this order of SH table yet. Please use runtime SH computation." ) exit() def SH_l0(self, x, y, z): l00 = 0.5 * np.sqrt(1 / np.pi) * torch.ones_like(x, device=x.device) return l00 def SH_l1(self, x, y, z): l1_m1 = np.sqrt(3 / 4 / np.pi) * y l1_0 = np.sqrt(3 / 4 / np.pi) * z l1_1 = np.sqrt(3 / 4 / np.pi) * x return torch.cat([self.SH_l0(x, y, z), l1_m1, l1_0, l1_1], 1) def SH_l2(self, x, y, z): l2_m2 = 0.5 * np.sqrt(15 / np.pi) * x * y l2_m1 = 0.5 * np.sqrt(15 / np.pi) * z * y l2_0 = 0.25 * np.sqrt(5 / np.pi) * (-x * x - y * y + 2 * z * z) l2_1 = 0.5 * np.sqrt(15 / np.pi) * x * z l2_2 = 0.25 * np.sqrt(15 / np.pi) * (x * x - y * y) return torch.cat([self.SH_l1(x, y, z), l2_m2, l2_m1, l2_0, l2_1, l2_2], 1) def SH_l3(self, x, y, z): l3_m3 = 0.25 * np.sqrt(35.0 / 2 / np.pi) * (3 * x * x - y * y) * y l3_m2 = 0.5 * np.sqrt(105 / np.pi) * x * y * z l3_m1 = 0.25 * np.sqrt( 21 / 2 / np.pi) * (4 * z * z - x * x - y * y) * y l3_0 = 0.25 * np.sqrt( 7 / np.pi) * (2 * z * z - 3 * x * x - 3 * y * y) * z l3_1 = 0.25 * np.sqrt(21 / 2 / np.pi) * (4 * z * z - x * x - y * y) * x l3_2 = 0.25 * np.sqrt(105 / np.pi) * (x * x - y * y) * z l3_3 = 0.25 * np.sqrt(35.0 / 2 / np.pi) * (x * x - 3 * y * y) * x return torch.cat( [self.SH_l2(x, y, z), l3_m3, l3_m2, l3_m1, l3_0, l3_1, l3_2, l3_3], 1) def SH_l4(self, x, y, z): l4_m4 = 0.75 * np.sqrt(35.0 / np.pi) * x * y * (x * x - y * y) l4_m3 = 0.75 * np.sqrt(35.0 / 2 / np.pi) * (3 * x * x - y * y) * y * z l4_m2 = 0.75 * np.sqrt(5 / np.pi) * x * y * (7 * z * z - 1) l4_m1 = 0.75 * np.sqrt(5 / 2 / np.pi) * z * y * (7 * z * z - 3) l4_0 = 3 / 16 * np.sqrt( 1 / np.pi) * (35 * z * z * z * z - 30 * z * z + 3) l4_1 = 0.75 * np.sqrt(5 / 2 / np.pi) * x * z * (7 * z * z - 3) l4_2 = 3 / 8 * np.sqrt(5 / np.pi) * (x * x - y * y) * (7 * z * z - 1) l4_3 = 0.75 * np.sqrt(35.0 / 2 / np.pi) * (x * x - 3 * y * y) * x * z l4_4 = 3 / 16 * np.sqrt(35.0 / np.pi) * (x * x * (x * x - 3 * y * y) - y * y * (3 * x * x - y * y)) return torch.cat([ self.SH_l3(x, y, z), l4_m4, l4_m3, l4_m2, l4_m1, l4_0, l4_1, l4_2, l4_3, l4_4 ], 1)
9,061
37.236287
129
py
pointnerf
pointnerf-master/data/llff_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from . import data_utils import glob from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py from data.base_dataset import BaseDataset import configparser import itertools from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir import copy FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def normalize(v): """Normalize a vector.""" return v / np.linalg.norm(v) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def unique_lst(list1): x = np.array(list1) return np.unique(x) def average_poses(poses): """ Calculate the average pose, which is then used to center all poses using @center_poses. Its computation is as follows: 1. Compute the center: the average of pose centers. 2. Compute the z axis: the normalized average z axis. 3. Compute axis y': the average y axis. 4. Compute x' = y' cross product z, then normalize it as the x axis. 5. Compute the y axis: z cross product x. Note that at step 3, we cannot directly use y' as y axis since it's not necessarily orthogonal to z axis. We need to pass from x to y. Inputs: poses: (N_images, 3, 4) Outputs: pose_avg: (3, 4) the average pose """ # 1. Compute the center center = poses[..., 3].mean(0) # (3) # 2. Compute the z axis z = normalize(poses[..., 2].mean(0)) # (3) # 3. Compute axis y' (no need to normalize as it's not the final output) y_ = poses[..., 1].mean(0) # (3) # 4. Compute the x axis x = normalize(np.cross(y_, z)) # (3) # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) y = np.cross(z, x) # (3) pose_avg = np.stack([x, y, z, center], 1) # (3, 4) return pose_avg def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions def flip_z(poses): z_flip_matrix = np.eye(4, dtype=np.float32) z_flip_matrix[2, 2] = -1.0 return np.matmul(poses, z_flip_matrix[None,...]) class LlffFtDataset(BaseDataset): @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument( '--full_comb', type=int, default=0, help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--img_wh', type=int, nargs=2, default=(960, 640), help='resize target of the image' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--holdoff', type=int, default=8, help='normalize the ray_dir to unit length or not, default not') return parser def initialize(self, opt, downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(opt.img_wh[0] * downSample), int(opt.img_wh[1] * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 1.0 self.max_len = max_len self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() self.ori_poses_bounds = np.load(os.path.join(self.data_dir, self.scan, 'poses_bounds.npy')) poses, avg_poses, bounds = self.get_poses(self.ori_poses_bounds) self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) norm_c2w = None if opt.normview == 1: norm_c2w = avg_poses if opt.normview == 2: self.norm_w2c, self.norm_c2w = torch.as_tensor(np.linalg.inv(avg_poses), device="cuda", dtype=torch.float32), torch.as_tensor(avg_poses, device="cuda", dtype=torch.float32) self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(poses, bounds, norm_c2w=norm_c2w) self.build_init_metas(opt.holdoff) self.load_images() self.total = len(self.id_list) print("dataset total:", self.split, self.total) # # def read_images(self): # image_paths = sorted(glob.glob(os.path.join(self.root_dir, 'images_4/*'))) def load_images(self): imgs = [] image_paths = sorted(glob.glob(os.path.join(self.data_dir, self.scan, 'images_4/*'))) print("id_list", self.id_list, image_paths) for i in self.all_id_list: img = Image.open(image_paths[i]).convert('RGB') img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (3, h, w) imgs.append(img) self.imgs = imgs def get_poses(self, poses_bounds): poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5) bounds = poses_bounds[:, -2:] # (N_images, 2) # Step 1: rescale focal length according to training resolution H, W, focal = poses[0, :, -1] # original intrinsics, same for all images self.focal = [focal * self.img_wh[0] / W, focal * self.img_wh[1] / H] # Step 2: correct poses poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1) poses, avg_poses = self.center_poses(poses, self.blender2opencv) near_original = bounds.min() far_original = bounds.max() scale_factor = near_original * 0.75 # 0.75 is the default parameter bounds /= scale_factor poses[..., 3] /= scale_factor avg_poses[..., 3] /= scale_factor avg_poses_holder = np.eye(4) avg_poses_holder[:3] = avg_poses # 2.65 / 200 * 192 = 2.544, min 2.1250 # range_original = far_original - near_original # scale_factor = range_original / 2.544 # bounds /= scale_factor # poses[..., 3] /= scale_factor # avg_poses[..., 3] /= scale_factor # avg_poses_holder = np.eye(4) # avg_poses_holder[:3] = avg_poses return poses, avg_poses_holder, bounds def build_proj_mats(self, poses, bounds, norm_c2w=None): w, h = self.img_wh proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] self.all_id_list = range(len(poses)) self.near_far = [bounds.min() * 0.8, bounds.max() * 1.2] # self.near_far = np.asarray([bounds.min()*0.9, bounds.max()*1.1]).astype(np.float32) print("dataset near_far", self.near_far) for vid in self.all_id_list: c2w = np.eye(4, dtype=np.float32) c2w[:3] = poses[vid] w2c = np.linalg.inv(c2w) if norm_c2w is not None: w2c = w2c @ norm_c2w c2w = np.linalg.inv(w2c) cam2worlds.append(c2w) world2cams.append(w2c) # build proj mat from source views to ref view proj_mat_l = np.eye(4) intrinsic = np.asarray([[self.focal[0], 0, w / 2], [0, self.focal[1], h / 2], [0, 0, 1]]) intrinsics.append(intrinsic.copy()) intrinsic[:2] = intrinsic[:2] / 4 # 4 times downscale in the feature space proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4] proj_mats += [[proj_mat_l, bounds[vid]]] # proj_mats += [[proj_mat_l, self.near_far]] return proj_mats, np.stack(intrinsics), np.stack(world2cams), np.stack(cam2worlds) def build_init_metas(self, holdoff): self.id_list_test = np.arange(len(self.all_id_list))[::holdoff] self.id_list_train = np.array([i for i in np.arange(len(self.all_id_list)) if (i not in self.id_list_test)]) self.id_list = self.id_list_test if self.split == "test" else self.id_list_train self.view_id_list = [] # index is id_list's position e.g., the real image id is id_list[view_id] cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds[self.id_list_train, :, :]] test_cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds[self.id_list_test, :, :]] if self.split=="train": cam_xyz = np.stack(cam_xyz_lst, axis=0) test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0) # if self.opt.full_comb <= 1: triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=test_cam_xyz, full_comb=self.opt.full_comb >=1) print("triangles:", triangles.shape) if self.opt.full_comb <= 1: self.view_id_list = [triangles[i] for i in range(len(triangles))] elif self.opt.full_comb == 2: # all combination triangles = list(itertools.combinations(range(len(cam_xyz)), 3)) self.view_id_list = [triangles[i] for i in range(len(triangles))] elif self.opt.full_comb in [3,4]: # 1 jump triplets = [] first_dict = {} for tris in triangles: if tris[0] not in first_dict.keys(): first_dict[tris[0]] = [] first_dict[tris[0]] += [tris[1], tris[2]] for key, val in first_dict.items(): first_dict[key] = list(unique_lst(val)) if self.opt.full_comb == 3: for key, val in first_dict.items(): pairs = list(itertools.combinations(first_dict[key], 2)) triplets += [[key]+list(pair) for pair in pairs] self.view_id_list = [triplets[i] for i in range(len(triplets))] elif self.opt.full_comb == 4: second_dict = copy.deepcopy(first_dict) for key, val in first_dict.items(): for second in val: second_dict[key] += first_dict[second] second_dict[key] = list(unique_lst(second_dict[key])) second_dict[key] = [val for val in second_dict[key] if val != key and val not in first_dict[key]] # print("key val", key, second_dict[key]) for key, val in second_dict.items(): pairs = list(itertools.combinations(second_dict[key], 2)) print("key val", key, pairs) triplets += [[key] + list(pair) for pair in pairs] print("len()", len(triplets)) # exit() self.view_id_list = [triplets[i] for i in range(len(triplets))] # print("&&&&&&&&&&&&&&&&&&&&&&&&&&self.view_id_list", len(self.view_id_list)) # elif self.opt.full_comb == 4: # 1 jump # if self.opt.full_comb<0: # with open(f'../data/nerf_synth_configs/list/lego360_init_pairs.txt') as f: # for line in f: # str_lst = line.rstrip().split(',') # src_views = [int(x) for x in str_lst] # self.view_id_list.append(src_views) def center_poses(self, poses, blender2opencv): """ Center the poses so that we can use NDC. See https://github.com/bmild/nerf/issues/34 Inputs: poses: (N_images, 3, 4) Outputs: poses_centered: (N_images, 3, 4) the centered poses pose_avg: (3, 4) the average pose """ pose_avg = average_poses(poses) # (3, 4) pose_avg_homo = np.eye(4) pose_avg_homo[:3] = pose_avg # convert to homogeneous coordinate for faster computation # by simply adding 0, 0, 0, 1 as the last row last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4) poses_homo = \ np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4) poses_centered = poses_centered @ blender2opencv poses_centered = poses_centered[:, :3] # (N_images, 3, 4) return poses_centered, (np.linalg.inv(pose_avg_homo) @ blender2opencv)[:3, :] def define_transforms(self): self.transform = T.ToTensor() def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h = [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.id_list[i] # mvs_images += [self.normalize_rgb(self.blackimgs[vid])] # mvs_images += [self.whiteimgs[vid]] # mvs_images += [self.blackimgs[vid]] imgs += [self.imgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars'] = near_fars.astype(np.float32) sample['near_fars_depth'] = self.near_far sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False): item = {} vid = self.id_list[id] img = self.imgs[vid] w2c = self.world2cams[vid] c2w = self.cam2worlds[vid] intrinsic = self.intrinsics[vid] proj_mat_ls, near_far = self.proj_mats[vid] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1] * 1.1]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0] * 0.9]).view(1, 1) item['h'] = height item['w'] = width # item['depths_h'] = self.depths[id] # print("near_far", near_far) # bounding box subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = vid if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False): item = self.__getitem__(idx, crop=crop) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = (transform_matrix[0:3, 0:3]) campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) # near far if self.opt.near_plane is not None: near = self.opt.near_plane else: near = max(dist - 1.5, 0.02) if self.opt.far_plane is not None: far = self.opt.far_plane # near + else: far = dist + 0.7 middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([far]).view(1, 1) item['near'] = torch.FloatTensor([near]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": px, py = self.proportional_select(gt_mask) else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0) item["pixel_idx"] = pixelcoords raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
30,712
39.358739
184
py
pointnerf
pointnerf-master/data/nerf_synth_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class NerfSynthFtDataset(BaseDataset): def initialize(self, opt, img_wh=[800,800], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(800 * downSample), int(800 * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 1.0 self.max_len = max_len self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() with open(os.path.join(self.data_dir, self.scan, f'transforms_train.json'), 'r') as f: self.meta = json.load(f) self.build_init_metas() self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) if opt.normview > 0: _, _ , w2cs, c2ws = self.build_proj_mats(list=torch.load('../data/dtu_configs/pairs.th')[f'{self.scan}_test']) norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws) if opt.normview >= 2: self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32) norm_w2c, norm_c2w = None, None self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(norm_w2c=norm_w2c, norm_c2w=norm_c2w) self.read_meta() self.total = len(self.id_list) print("dataset total:", self.split, self.total) @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') return parser def normalize_cam(self, w2cs, c2ws): # cam_xyz = c2ws[..., :3, 3] # rtp = self.bcart2sphere(cam_xyz) # print(rtp.shape) # rtp = np.mean(rtp, axis=0) # avg_xyz = self.sphere2cart(rtp) # euler_lst = [] # for i in range(len(c2ws)): # euler_angles = self.matrix2euler(c2ws[i][:3,:3]) # print("euler_angles", euler_angles) # euler_lst += [euler_angles] # euler = np.mean(np.stack(euler_lst, axis=0), axis=0) # print("euler mean ",euler) # M = self.euler2matrix(euler) # norm_c2w = np.eye(4) # norm_c2w[:3,:3] = M # norm_c2w[:3,3] = avg_xyz # norm_w2c = np.linalg.inv(norm_c2w) # return norm_w2c, norm_c2w index = 0 return w2cs[index], c2ws[index] def define_transforms(self): self.transform = T.ToTensor() # # def load_poses_all(self): # c2ws = [] # List = sorted(os.listdir(os.path.join(self.data_dir, f'Cameras/train/'))) # for item in List: # proj_mat_filename = os.path.join(self.data_dir, f'Cameras/train/{item}') # intrinsic, w2c, near_far = self.read_cam_file(proj_mat_filename) # intrinsic[:2] *= 4 # c2ws.append(np.linalg.inv(w2c)) # self.focal = [intrinsic[0, 0], intrinsic[1, 1]] # return np.stack(c2ws) def build_init_metas(self): self.view_id_list = [] self.id_list = [] if self.split=="train": with open(f'../data/nerf_synth_configs/list/{self.scan}_finetune_init_pairs_final.txt') as f: num_lst = f.readline().rstrip().split(',') num_viewpoint, num_pairs = int(num_lst[0]), int(num_lst[1]) # viewpoints (20) for _ in range(num_viewpoint): ref_view = int(f.readline().rstrip()) str_lst=f.readline().rstrip().split(',') src_views = [int(x) for x in str_lst] self.view_id_list.append([ref_view] + src_views) self.id_list.append(ref_view) for _ in range(num_viewpoint, num_pairs): ref_view = int(f.readline().rstrip()) str_lst = f.readline().rstrip().split(',') src_views = [int(x) for x in str_lst] self.view_id_list.append([ref_view] + src_views) else: self.id_list = torch.load('../data/dtu_configs/pairs.th')[f'{self.scan}_{self.split}'] def build_proj_mats(self, list=None, norm_w2c=None, norm_c2w=None): proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] list = self.id_list if list is None else list focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh self.focal = focal self.near_far = np.array([2.0, 6.0]) for vid in list: frame = self.meta['frames'][vid] c2w = np.array(frame['transform_matrix']) @ self.blender2opencv if norm_w2c is not None: c2w = norm_w2c @ c2w w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsic = np.array([[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]) intrinsics.append(intrinsic.copy()) # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def read_meta(self): w, h = self.img_wh self.image_paths = [] self.poses = [] self.all_rays = [] self.blackimgs = [] self.whiteimgs = [] self.depths = [] self.alphas = [] self.view_id_dict = {} self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3) count = 0 for i, idx in enumerate(self.id_list): frame = self.meta['frames'][idx] image_path = os.path.join(self.data_dir, self.scan, f"{frame['file_path']}.png") self.image_paths += [image_path] img = Image.open(image_path) img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) self.depths += [(img[-1:, ...] > 0.1).numpy().astype(np.float32)] self.alphas += [img[-1:].numpy().astype(np.float32)] self.blackimgs += [img[:3] * img[-1:]] self.whiteimgs += [img[:3] * img[-1:] + (1 - img[-1:])] # ray directions for all pixels, same for all images (same H, W, focal) # rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3) # # self.all_rays += [torch.cat([rays_o, rays_d, # self.near_far[0] * torch.ones_like(rays_o[:, :1]), # self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8) self.view_id_dict[idx] = i self.poses = self.cam2worlds def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.view_id_dict[i] # mvs_images += [self.normalize_rgb(self.blackimgs[vid])] # mvs_images += [self.whiteimgs[vid]] mvs_images += [self.blackimgs[vid]] imgs += [self.whiteimgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) depths_h.append(self.depths[vid]) alphas.append(self.alphas[vid]) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) depths_h = np.stack(depths_h) alphas = np.stack(alphas) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = alphas.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars'] = near_fars.astype(np.float32) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False): item = {} img = self.whiteimgs[id] w2c = self.world2cams[id] c2w = self.cam2worlds[id] intrinsic = self.intrinsics[id] proj_mat_ls, near_far = self.proj_mats[id] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1) item['h'] = height item['w'] = width item['depths_h'] = self.depths[id] # bounding box subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = id if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False): item = self.__getitem__(idx, crop=crop) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = (transform_matrix[0:3, 0:3]) campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) # near far if self.opt.near_plane is not None: near = self.opt.near_plane else: near = max(dist - 1.5, 0.02) if self.opt.far_plane is not None: far = self.opt.far_plane # near + else: far = dist + 0.7 middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([far]).view(1, 1) item['near'] = torch.FloatTensor([near]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": px, py = self.proportional_select(gt_mask) else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0) item["pixel_idx"] = pixelcoords raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
26,636
39.055639
174
py
pointnerf
pointnerf-master/data/dtu_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img class DtuDataset(BaseDataset): def initialize(self, opt, n_views=3, levels=1, img_wh=[640,512], downSample=1.0, max_len=-1): self.opt = opt self.data_dir = opt.data_root if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.bg_color.split()] if len(self.bg_color) != 3: self.bg_color = None self.img_wh = img_wh self.downSample = downSample self.scale_factor = 1.0 / 200 self.max_len = max_len self.n_views = n_views self.levels = levels # FPN levels self.split = opt.split self.build_metas() self.build_proj_mats() self.define_transforms() self.near_far = np.asarray([2.125, 4.525]) if img_wh is not None: assert img_wh[0] % 32 == 0 and img_wh[1] % 32 == 0, \ 'img_wh must both be multiples of 32!' self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if os.path.isfile(self.data_dir + "/bb.txt"): self.bb = np.loadtxt(self.data_dir + "/bb.txt") print("boundingbox", self.bb) else: self.bb = np.array([-1, -1, -1, 1, 1, 1]).reshape( (2, 3)).astype(np.float32) self.total = len(self.metas) print("dataset total:", self.split, self.total) return @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.0, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=6.0, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) # parser.add_argument( # '--z_dir', # type=str, # default="down", # help= # 'z axis up (in nerf json), down (in reflectance ply)' # ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') return parser def define_transforms(self): self.transform = T.Compose([T.ToTensor(), # T.Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]), ]) def build_metas(self): self.metas = [] with open(f'../data/dtu_configs/lists/dtu_{self.split}_all.txt') as f: self.scans = [line.rstrip() for line in f.readlines()] # light conditions 0-6 for training # light condition 3 for testing (the brightest?) light_idxs = [3] if 'train' != self.split else range(7) self.id_list = [] for scan in self.scans: with open(f'../data/dtu_configs/dtu_pairs.txt') as f: num_viewpoint = int(f.readline()) # viewpoints (49) for _ in range(num_viewpoint): ref_view = int(f.readline().rstrip()) src_views = [int(x) for x in f.readline().rstrip().split()[1::2]] for light_idx in light_idxs: self.metas += [(scan, light_idx, ref_view, src_views)] self.id_list.append([ref_view] + src_views) self.id_list = np.unique(self.id_list) self.build_remap() def build_proj_mats(self): proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] for vid in self.id_list: proj_mat_filename = os.path.join(self.data_dir, f'Cameras/train/{vid:08d}_cam.txt') intrinsic, extrinsic, near_far = self.read_cam_file(proj_mat_filename) intrinsic[:2] *= 4 extrinsic[:3, 3] *= self.scale_factor intrinsic[:2] = intrinsic[:2] * self.downSample intrinsics += [intrinsic.copy()] # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ extrinsic[:3, :4] proj_mats += [(proj_mat_l, near_far)] world2cams += [extrinsic] cam2worlds += [np.linalg.inv(extrinsic)] self.proj_mats, self.intrinsics = np.stack(proj_mats), np.stack(intrinsics) self.world2cams, self.cam2worlds = np.stack(world2cams), np.stack(cam2worlds) def read_cam_file(self, filename): with open(filename) as f: lines = [line.rstrip() for line in f.readlines()] # extrinsics: line [1,5), 4x4 matrix extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ') extrinsics = extrinsics.reshape((4, 4)) # intrinsics: line [7-10), 3x3 matrix intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ') intrinsics = intrinsics.reshape((3, 3)) # depth_min & depth_interval: line 11 depth_min = float(lines[11].split()[0]) * self.scale_factor depth_max = depth_min + float(lines[11].split()[1]) * 192 * self.scale_factor * 1.06 self.depth_interval = float(lines[11].split()[1]) return intrinsics, extrinsics, [depth_min, depth_max] def check_read_depth(self, depth_filename, processed_filename): depth_h = np.array(read_pfm(depth_filename)[0], dtype=np.float32) # (800, 800) ? (1200. 1600) depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST) # (600, 800) depth_h = depth_h[44:556, 80:720] # (512, 640) depth_h = cv2.resize(depth_h, None, fx=self.downSample, fy=self.downSample, interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!! depth = cv2.resize(depth_h, None, fx=1.0 / 4, fy=1.0 / 4, interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!! depth_pro = np.array(read_pfm(processed_filename)[0], dtype=np.float32) # (800, 800) ? (1200. 1600) print("depth", depth.shape, depth_pro.shape, np.sum(np.abs(depth-depth_pro))) def read_depth(self, filename, downSample=None): downSample = self.downSample if downSample is None else downSample depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800) ? (1200. 1600) depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST) # (600, 800) depth_h = depth_h[44:556, 80:720] # (512, 640) depth_h = cv2.resize(depth_h, None, fx=downSample, fy=downSample, interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!! depth = cv2.resize(depth_h, None, fx=1.0 / 4, fy=1.0 / 4, interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!! mask = depth > 0 return depth, mask, depth_h def build_remap(self): self.remap = np.zeros(np.max(self.id_list) + 1).astype('int') for i, item in enumerate(self.id_list): self.remap[item] = i def __len__(self): return len(self.metas) if self.max_len <= 0 else self.max_len def name(self): return 'DtuDataset' def __del__(self): print("end loading") def __getitem__(self, idx, crop=False): sample = {} scan, light_idx, target_view, src_views = self.metas[idx] if self.split=='train': ids = torch.randperm(5)[:3] view_ids = [src_views[i] for i in ids] + [target_view] else: view_ids = [src_views[i] for i in range(3)] + [target_view] affine_mat, affine_mat_inv = [], [] imgs, depths_h = [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i, vid in enumerate(view_ids): # NOTE that the id in image file names is from 1 to 49 (not 0~48) img_filename = os.path.join(self.data_dir, f'Rectified/{scan}_train/rect_{vid + 1:03d}_{light_idx}_r5000.png') # print("img_filename",img_filename) depth_filename = os.path.join(self.data_dir, f'Depths_raw/{scan}/depth_map_{vid:04d}.pfm') img = Image.open(img_filename) # print("img_filename", img_filename, depth_filename) img_wh = np.round(np.array(img.size) * self.downSample).astype('int') img = img.resize(img_wh, Image.BILINEAR) img = self.transform(img) imgs += [img] index_mat = self.remap[vid] proj_mat_ls, near_far = self.proj_mats[index_mat] intrinsics.append(self.intrinsics[index_mat]) w2cs.append(self.world2cams[index_mat]) c2ws.append(self.cam2worlds[index_mat]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) if os.path.exists(depth_filename): depth, mask, depth_h = self.read_depth(depth_filename) # self.check_read_depth(depth_filename, os.path.join(self.data_dir, f'Depths/{scan}_train/depth_map_{vid:04d}.pfm')) depth_h *= self.scale_factor depths_h.append(depth_h) else: depths_h.append(np.zeros((1, 1))) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) # if self.split == 'train': # imgs = colorjitter(imgs, 1.0+(torch.rand((4,))*2-1.0)*0.5) # imgs = F.normalize(imgs,mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) depths_h = np.stack(depths_h) # print("proj_mats", proj_mats[0].shape) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = imgs #self.normalize_rgb(imgs) # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars_depth'] = near_fars.astype(np.float32)[0] sample['near_fars'] = np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1)) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv sample['scan'] = scan sample['c2ws_all'] = c2ws_all.astype(np.float32) item = {} gt_image = np.transpose(imgs[self.opt.trgt_id, ...], (1,2,0)) width, height = gt_image.shape[1], gt_image.shape[0] # gt_mask = (gt_image[..., -1] > 0.1).astype(np.float32) # item['gt_mask'] = torch.from_numpy(gt_mask).permute(2, 0, 1).float() # gt_image = gt_image / 255.0 # already / 255 for blender transform_matrix = w2cs[self.opt.ref_vid] @ c2ws[self.opt.trgt_id] # transform_matrix = w2cs[0] @ c2ws[0] camrot = (transform_matrix[0:3, 0:3]) campos = transform_matrix[0:3, 3] item["intrinsic"] = sample['intrinsics'][self.opt.trgt_id, ...] # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_fars[self.opt.trgt_id][1]]).view(1, 1) item['near'] = torch.FloatTensor([near_fars[self.opt.trgt_id][0]]).view(1, 1) item['h'] = height item['w'] = width # bounding box item['bb'] = torch.from_numpy(self.bb).float() subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) # gt_mask = np.reshape(gt_mask, (-1, 1)) # if self.opt.bg_color is not None: # gt_image = np.clip( # np.power( # np.power(gt_image, 2.2) + # (1 - gt_mask) * self.opt.bg_color, 1.0 / 2.2), 0, 1) # gt_mask[gt_mask > 0] = 1 item['gt_image'] = gt_image # item['gt_image'] = torch.from_numpy(gt_image).float().contiguous() # item["gt_mask"] = torch.from_numpy(gt_mask).float().contiguous() if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) sample.update(item) return sample def get_item(self, idx, crop=False): item = self.__getitem__(idx, crop=crop) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = (transform_matrix[0:3, 0:3]) campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) # near far if self.opt.near_plane is not None: near = self.opt.near_plane else: near = max(dist - 1.5, 0.02) if self.opt.far_plane is not None: far = self.opt.far_plane # near + else: far = dist + 0.7 middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([far]).view(1, 1) item['near'] = torch.FloatTensor([near]).view(1, 1) item['h'] = self.height item['w'] = self.width # bounding box item['bb'] = torch.from_numpy(self.bb).float() subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": px, py = self.proportional_select(gt_mask) else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0) item["pixel_idx"] = pixelcoords raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def check_points_range(self): import glob from os import path data_dir='/home/xharlie/user_space/data/nrData/dtu/' near_far=[2.125, 4.525] W, H = 640, 512 downSample=1 self.scale_factor=1/200 scale_factor=1/200 all_min_lst, all_max_lst = [], [] for idx in range(1, 129): scan = "scan{}".format(idx) obj_min_lst, obj_max_lst = [], [] for vid in range(49): depth_filename = os.path.join(data_dir, f'Depths_raw/{scan}/depth_map_{vid:04d}.pfm') camfilename = os.path.join(data_dir, f'Cameras/train/{vid:08d}_cam.txt') if not path.exists(depth_filename) or not path.exists(camfilename): print("depth_filename:", path.exists(depth_filename), "; camfilename ",path.exists(camfilename)) break _, _, depth_h = self.read_depth(depth_filename, downSample=downSample) depth_h *= scale_factor mask = np.logical_and(depth_h >= near_far[0], depth_h <= near_far[1]).reshape(-1) intrinsic, extrinsic, near_far = self.read_cam_file(camfilename) intrinsic[:2] *= 4 extrinsic[:3, 3] *= scale_factor intrinsic[:2] = intrinsic[:2] * downSample w2c = extrinsic c2w = np.linalg.inv(extrinsic) # mask = torch.logical_and(depth_h >= near_far[0], cam_expected_depth <= near_far[1]) ndc_expected_depth = (depth_h - near_far[0]) / (near_far[1] - near_far[0]) # 512, 640 valid_z = ndc_expected_depth valid_x = np.arange(W, dtype=np.float32) / (W - 1) valid_y = np.arange(H, dtype=np.float32) / (H - 1) valid_y, valid_x = np.meshgrid(valid_y, valid_x, indexing="ij") # 512, 640; 512, 640 # B,N,H,W ndc_xyz = np.stack([valid_x, valid_y, valid_z], axis=-1) # 512, 640, 3 cam_xyz = self.ndc_2_cam(ndc_xyz, near_far, intrinsic, W, H) w_xyz = np.concatenate([cam_xyz, np.ones_like(cam_xyz[...,0:1])], axis=-1) @ c2w.T # (327680, 4) w_xyz = w_xyz[mask,:3] xyz_min_np, xyz_max_np = np.min(w_xyz, axis=-2), np.max(w_xyz, axis=-2) obj_min_lst.append(xyz_min_np) obj_max_lst.append(xyz_max_np) max_edge = max(xyz_max_np-xyz_min_np) # print("xyz_min_np, xyz_max_np edges,", xyz_min_np, xyz_max_np, xyz_max_np-xyz_min_np) if len(obj_min_lst) > 0: obj_min = np.min(np.array(obj_min_lst), axis=-2) obj_max= np.max(np.array(obj_max_lst), axis=-2) all_min_lst.append(obj_min) all_max_lst.append(obj_max) print(scan, "min", obj_min, "max", obj_max) obj_min = np.min(np.array(all_min_lst), axis=-2) obj_max = np.max(np.array(all_max_lst), axis=-2) print("xyz_min, xyz_max, edges,", obj_min, obj_max, obj_max-obj_min) def ndc_2_cam(self, ndc_xyz, near_far, intrinsic, W, H): inv_scale = np.array([[W - 1, H - 1]]) cam_z = ndc_xyz[..., 2:3] * (near_far[1] - near_far[0]) + near_far[0] cam_xy = ndc_xyz[..., :2] * inv_scale * cam_z cam_xyz = np.concatenate([cam_xy, cam_z], axis=-1).reshape(-1,3) cam_xyz = cam_xyz @ np.linalg.inv(intrinsic.T) return cam_xyz if __name__ == '__main__': db = DtuDataset() db.check_points_range() # python -m data.dtu_dataset
28,325
41.595489
174
py
pointnerf
pointnerf-master/data/base_dataset.py
import torch.utils.data as data from PIL import Image class BaseDataset(data.Dataset): def __init__(self): super(BaseDataset, self).__init__() def name(self): return self.__class__.__name__ @staticmethod def modify_commandline_options(parser, is_train): return parser def initialize(self, opt): raise NotImplementedError() def __len__(self): raise NotImplementedError()
440
20
53
py
pointnerf
pointnerf-master/data/dtu_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import itertools import random from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py from . import data_utils from utils import util from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class DtuFtDataset(BaseDataset): def initialize(self, opt, n_views=3, img_wh=[640,512], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split assert int(640 * downSample) % 32 == 0, \ f'image width is {int(640 * downsample)}, it should be divisible by 32, you may need to modify the imgScale' self.img_wh = (int(640 * downSample), int(512 * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 200 self.max_len = max_len self.n_views = n_views self.define_transforms() self.pair_idx = torch.load('../data/dtu_configs/pairs.th') self.pair_idx = [self.pair_idx['dtu_train'],self.pair_idx['dtu_test']] print("dtu_ft train id", self.pair_idx[0]) print("dtu_ft test id", self.pair_idx[1]) self.bbox_3d = torch.tensor([[-1.0, -1.0, 2.2], [1.0, 1.0, 4.2]]) # self.near_far = np.asarray([2.125, 4.525]) if img_wh is not None: assert img_wh[0] % 32 == 0 and img_wh[1] % 32 == 0, \ 'img_wh must both be multiples of 32!' self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.build_init_metas() self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) if opt.normview > 0: _, _ , w2cs, c2ws = self.build_proj_mats(list=self.pair_idx[1]) norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws) if opt.normview == 2: self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32) norm_w2c, norm_c2w = None, None self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(norm_w2c=norm_w2c, norm_c2w=norm_c2w) self.build_view_lst() if opt.split != "render": self.read_meta() self.total = len(self.id_list) else: self.get_render_poses() self.total = len(self.render_poses) print("dataset total:", self.split, self.total) def get_render_poses(self): self.render_poses = util.gen_render_path(self.cam2worlds[:3,...], N_views=60) # cam_xyz_lst = [c2w[:3, 3] for c2w in self.cam2worlds] # cam_xyz = np.stack(cam_xyz_lst, axis=0) # triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=None, full_comb=False) # self.render_poses = util.gen_render_path_contour(triangles, self.cam2worlds, N_views=200) # def gen_render_path(c2ws, N_views=30): # N = len(c2ws) # rotvec, positions = [], [] # rotvec_inteplat, positions_inteplat = [], [] # weight = np.linspace(1.0, .0, N_views // 3, endpoint=False).reshape(-1, 1) # for i in range(N): # r = R.from_matrix(c2ws[i, :3, :3]) # euler_ange = r.as_euler('xyz', degrees=True).reshape(1, 3) # if i: # mask = np.abs(euler_ange - rotvec[0]) > 180 # euler_ange[mask] += 360.0 # rotvec.append(euler_ange) # positions.append(c2ws[i, :3, 3:].reshape(1, 3)) # # if i: # rotvec_inteplat.append(weight * rotvec[i - 1] + (1.0 - weight) * rotvec[i]) # positions_inteplat.append(weight * positions[i - 1] + (1.0 - weight) * positions[i]) # # rotvec_inteplat.append(weight * rotvec[-1] + (1.0 - weight) * rotvec[0]) # positions_inteplat.append(weight * positions[-1] + (1.0 - weight) * positions[0]) # # c2ws_render = [] # angles_inteplat, positions_inteplat = np.concatenate(rotvec_inteplat), np.concatenate(positions_inteplat) # for rotvec, position in zip(angles_inteplat, positions_inteplat): # c2w = np.eye(4) # c2w[:3, :3] = R.from_euler('xyz', rotvec, degrees=True).as_matrix() # c2w[:3, 3:] = position.reshape(3, 1) # c2ws_render.append(c2w.copy()) # c2ws_render = np.stack(c2ws_render) # return c2ws_render @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2, #2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=6, #4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--full_comb', type=int, default=0, help='' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--uni_depth', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') return parser def define_transforms(self): self.transform = T.ToTensor() def read_cam_file(self, filename): with open(filename) as f: lines = [line.rstrip() for line in f.readlines()] # extrinsics: line [1,5), 4x4 matrix extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ') extrinsics = extrinsics.reshape((4, 4)) # intrinsics: line [7-10), 3x3 matrix intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ') intrinsics = intrinsics.reshape((3, 3)) # depth_min & depth_interval: line 11 depth_min = self.opt.near_plane if self.opt.uni_depth > 0 else float(lines[11].split()[0]) * self.scale_factor depth_max = self.opt.far_plane if self.opt.uni_depth > 0 else depth_min + float(lines[11].split()[1]) * 192 * 1.06 * self.scale_factor self.depth_interval = float(lines[11].split()[1]) return intrinsics, extrinsics, [depth_min, depth_max] def read_depth(self, filename): depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800) depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST) # (600, 800) depth_h = depth_h[44:556, 80:720] # (512, 640) depth_h = cv2.resize(depth_h, None, fx=self.downSample, fy=self.downSample, interpolation=cv2.INTER_NEAREST) # !!!!!!!!!!!!!!!!!!!!!!!!! return depth_h def load_poses_all(self): c2ws = [] List = sorted(os.listdir(os.path.join(self.data_dir, f'Cameras/train/'))) for item in List: proj_mat_filename = os.path.join(self.data_dir, f'Cameras/train/{item}') intrinsic, w2c, near_far = self.read_cam_file(proj_mat_filename) intrinsic[:2] *= 4 c2ws.append(np.linalg.inv(w2c)) self.focal = [intrinsic[0, 0], intrinsic[1, 1]] return np.stack(c2ws) def build_view_lst(self): cam_xyz_lst = [c2w[:3, 3] for c2w in self.cam2worlds] if self.opt.full_comb == 1: # pass triangles = list(itertools.combinations(self.id_list, 3)) self.view_id_list = [] for tris in triangles: tris = list(tris) random.shuffle(tris) self.view_id_list.append(tris) elif self.opt.full_comb > 1: # pass cam_xyz = np.stack(cam_xyz_lst, axis=0) # test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0) # if self.opt.full_comb <= 1: triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=None, full_comb=True) if self.opt.full_comb == 2: self.view_id_list = [triangles[i] for i in range(len(triangles))] elif self.opt.full_comb in [3, 4]: # 1 jump triplets = [] first_dict = {} for tris in triangles: if tris[0] not in first_dict.keys(): first_dict[tris[0]] = [] first_dict[tris[0]] += [tris[1], tris[2]] for key, val in first_dict.items(): first_dict[key] = list(unique_lst(val)) if self.opt.full_comb == 3: for key, val in first_dict.items(): pairs = list(itertools.combinations(first_dict[key], 2)) triplets += [[key] + list(pair) for pair in pairs] self.view_id_list = [triplets[i] for i in range(len(triplets))] elif self.opt.full_comb == 4: second_dict = copy.deepcopy(first_dict) for key, val in first_dict.items(): for second in val: second_dict[key] += first_dict[second] second_dict[key] = list(unique_lst(second_dict[key])) second_dict[key] = [val for val in second_dict[key] if val != key and val not in first_dict[key]] # print("key val", key, second_dict[key]) for key, val in second_dict.items(): pairs = list(itertools.combinations(second_dict[key], 2)) print("key val", key, pairs) triplets += [[key] + list(pair) for pair in pairs] print("len()", len(triplets)) # exit() self.view_id_list = [triplets[i] for i in range(len(triplets))] for i in range(len(self.view_id_list)): triplets = self.view_id_list[i] real_trip = [self.id_list[j] for j in triplets] self.view_id_list[i] = real_trip def build_init_metas(self): self.view_id_list = [] self.id_list = [] if self.split != "test": with open(f'../data/dtu_configs/dtu_finetune_init_pairs.txt') as f: num_viewpoint = int(f.readline()) # viewpoints (16) for _ in range(num_viewpoint): ref_view = int(f.readline().rstrip()) str_lst=f.readline().rstrip().split(',') src_views = [int(x) for x in str_lst] self.view_id_list.append([ref_view] + src_views) self.id_list.append(ref_view) else: self.id_list = self.pair_idx[1] if self.split == "comb": self.id_list += self.pair_idx[1] with open(f'../data/dtu_configs/lists/dtu_test_ground.txt') as f: lines = f.readlines() for line in lines: info = line.strip().split() if self.scan == info[0]: self.plane_ind = int(info[1]) print("self.plane_ind", self.plane_ind) break if self.opt.full_comb < 0: with open(f'../data/nerf_synth_configs/list/lego360_init_pairs.txt') as f: for line in f: str_lst = line.rstrip().split(',') src_views = [int(x) for x in str_lst] self.view_id_list.append(src_views) def build_proj_mats(self, list=None, norm_w2c=None, norm_c2w=None): list = self.id_list if list is None else list proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] for vid in list: proj_mat_filename = os.path.join(self.data_dir, f'Cameras/train/{vid:08d}_cam.txt') intrinsic, extrinsic, near_far = self.read_cam_file(proj_mat_filename) intrinsic[:2] *= 4 extrinsic[:3, 3] *= self.scale_factor if norm_c2w is not None: extrinsic = extrinsic @ norm_c2w intrinsic[:2] = intrinsic[:2] * self.downSample intrinsics += [intrinsic.copy()] # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ extrinsic[:3, :4] proj_mats += [(proj_mat_l, near_far)] world2cams += [extrinsic] cam2worlds += [np.linalg.inv(extrinsic)] proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def bcart2sphere(self, xyz): r = np.linalg.norm(xyz, axis=1) xyn = np.linalg.norm(xyz[...,:2], axis=1) th = np.arctan2(xyn, xyz[...,2]) ph = np.arctan2(xyz[...,1], xyz[...,0]) print("r", r.shape, r, xyn.shape, th.shape, ph.shape) return np.stack([r,th,ph], axis=-1) def sphere2cart(self, rtp): r, th, ph = rtp[0], rtp[1], rtp[2] x = r * np.sin(th) * np.cos(ph) y = r * np.sin(th) * np.sin(ph) z = r * np.cos(th) return np.asarray([x,y,z]) def matrix2euler(self, M): x = np.arctan2(-M[1][2], M[2][2]) cosY = np.sqrt(1 - M[0][2]) y = np.arctan2(M[0][2], cosY) sinZ = np.cos(x) * M[1][0] + np.sin(x) * M[2][0] cosZ = np.cos(x) * M[1][1] + np.sin(x) * M[2][1] z = np.arctan2(sinZ, cosZ) return np.asarray([x,y,z]) def euler2matrix(self, xyz): Cxyz = np.cos(xyz) Sxyz = np.sin(xyz) Cx, Cy, Cz = Cxyz[0], Cxyz[1], Cxyz[2] Sx, Sy, Sz = Sxyz[0], Sxyz[1], Sxyz[2] M = [[Cy*Cz, -Cy*Sz, Sy], [Sx*Sy*Cz + Cx*Sz, -Sx*Sy*Sz + Cx*Cz, -Sx*Cy], [-Cx*Sy*Cz + Sx*Sz, Cx*Sy*Sz + Sx*Cz, Cx*Cy] ] return np.array(M) def normalize_cam(self, w2cs, c2ws): # cam_xyz = c2ws[..., :3, 3] # rtp = self.bcart2sphere(cam_xyz) # print(rtp.shape) # rtp = np.mean(rtp, axis=0) # avg_xyz = self.sphere2cart(rtp) # euler_lst = [] # for i in range(len(c2ws)): # euler_angles = self.matrix2euler(c2ws[i][:3,:3]) # print("euler_angles", euler_angles) # euler_lst += [euler_angles] # euler = np.mean(np.stack(euler_lst, axis=0), axis=0) # print("euler mean ",euler) # M = self.euler2matrix(euler) # norm_c2w = np.eye(4) # norm_c2w[:3,:3] = M # norm_c2w[:3,3] = avg_xyz # norm_w2c = np.linalg.inv(norm_c2w) # return norm_w2c, norm_c2w index=0 return w2cs[index], c2ws[index] def read_meta(self): # sub select training views from pairing file # if os.path.exists('configs/pairs.th'): # self.img_idx = self.pair_idx[0] if 'train'== self.split else self.pair_idx[1] # print(f'===> {self.split}ing index: {self.img_idx}') # name = os.path.basename(self.data_dir) # test_idx = torch.load('configs/pairs.th')[f'{name}_test'] # self.img_idx = test_idx if self.split!='train' else np.delete(np.arange(0,49),test_idx) w, h = self.img_wh self.image_paths = [] self.poses = [] self.all_rays = [] self.imgs = [] self.depths = [] self.all_rgbs = [] self.all_depth = [] self.view_id_dict = {} count = 0 for i, idx in enumerate(self.id_list): image_path = os.path.join(self.data_dir, f'Rectified/{self.scan}_train/rect_{idx + 1:03d}_3_r5000.png') depth_filename = os.path.join(self.data_dir, f'Depths_raw/{self.scan}/depth_map_{idx:04d}.pfm') self.image_paths += [image_path] img = Image.open(image_path) img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (3, h, w) self.imgs += [img] self.all_rgbs += [img.reshape(3, -1).permute(1, 0)] # (h*w, 3) RGBA if os.path.exists(depth_filename): depth = self.read_depth(depth_filename) depth *= self.scale_factor self.depths += [depth] self.all_depth += [torch.from_numpy(depth).float().view(-1,1)] # ray directions for all pixels, same for all images (same H, W, focal) intrinsic = self.intrinsics[count] # center = [intrinsic[0,2], intrinsic[1,2]] self.focal = [intrinsic[0,0], intrinsic[1,1]] # self.directions = get_ray_directions(h, w, self.focal, center) # (h, w, 3) # rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3) # # self.all_rays += [torch.cat([rays_o, rays_d, # self.near_far[0] * torch.ones_like(rays_o[:, :1]), # self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8) self.view_id_dict[idx] = i self.poses = self.cam2worlds # if 'train' == self.split: # self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w, 3) # self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3) # else: # self.all_rays = torch.stack(self.all_rays, 0) # (len(self.meta['frames]),h*w, 3) # self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1,*self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3) # self.all_depth = torch.stack(self.all_depth, 0).reshape(-1,*self.img_wh[::-1]) # (len(self.meta['frames]),h,w,3) # count+=1 def __len__(self): return self.total def name(self): return 'DtuDataset' def __del__(self): print("end loading") def get_campos_ray(self): centerpixel = np.asarray(self.img_wh).astype(np.float32)[None, :] // 2 camposes = [] centerdirs = [] for i, idx in enumerate(self.id_list): c2w = self.cam2worlds[i].astype(np.float32) campos = c2w[:3, 3] camrot = c2w[:3, :3] raydir = get_dtu_raydir(centerpixel, self.intrinsics[0].astype(np.float32), camrot, True) camposes.append(campos) centerdirs.append(raydir) camposes = np.stack(camposes, axis=0) # 2091, 3 centerdirs = np.concatenate(centerdirs, axis=0) # 2091, 3 # print("camposes", camposes.shape, centerdirs.shape) return torch.as_tensor(camposes, device="cuda", dtype=torch.float32), torch.as_tensor(centerdirs, device="cuda", dtype=torch.float32) def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] imgs, depths_h = [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.view_id_dict[i] imgs += [self.imgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) depths_h.append(self.depths[vid]) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) depths_h = np.stack(depths_h) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = imgs #self.normalize_rgb(imgs) # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars_depth'] = near_fars.astype(np.float32)[0] sample['near_fars'] = np.tile(sample['near_fars_depth'][None,...],(len(imgs),1)) #np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1)) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def normalize_rgb(self, data): # to unnormalize image for visualization # data V, C, H, W V, C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 3, 1, 1) return (data - mean) / std def __getitem__(self, id, crop=False, full_img=False): item = {} img = self.imgs[id] if full_img: item['images'] = img[None,...] w2c = self.world2cams[id] c2w = self.cam2worlds[id] intrinsic = self.intrinsics[id] proj_mat_ls, near_far = self.proj_mats[id] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["c2w"] = torch.from_numpy(c2w).float() item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1) item['h'] = height item['w'] = width plane_pnt, plane_normal, plane_color = self.get_plane_param(self.plane_ind) item['plane_pnt'] = torch.FloatTensor(plane_pnt) item['plane_normal'] = torch.FloatTensor(plane_normal) item['plane_color'] = torch.FloatTensor(plane_color) item['depths_h'] = self.depths[id] # bounding box subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = id item['vid'] = self.id_list[id] if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = transform_matrix[0:3, 0:3] campos = transform_matrix[0:3, 3] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] item['intrinsic'] = self.intrinsics[0] # near far item['far'] = torch.FloatTensor([self.opt.far_plane]).view(1, 1) item['near'] = torch.FloatTensor([self.opt.near_plane]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, self.intrinsics[0], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() item['id'] = idx plane_pnt, plane_normal, plane_color = self.get_plane_param(self.plane_ind) item['plane_pnt'] = torch.FloatTensor(plane_pnt) item['plane_normal'] = torch.FloatTensor(plane_normal) item['plane_color'] = torch.FloatTensor(plane_color) if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_plane_param(self, ind): plane_pnt = [[-0.49666997, 0.52160616, 3.6239593], [0.20770223, -0.74818161, 3.98697683], [-0.04889537, -0.84123057, 4.03164617]][ind] plane_normal = [[-0.11364093, 0.38778102, 0.91471942], [-0.11165793, 0.3806543, 0.91795142], [-0.11154823, 0.3783277, 0.91892608]][ind] plane_color = [[1.0, 1.0, 1.0], [150.72447808/255, 99.68367002/255, 63.40976961/255], [80.28243032/255, 54.3915082/255, 35.07029825/255]][ind] return plane_pnt, plane_normal, plane_color def get_plane_param_points(self): r, amount = 10, int(8e3) plane_pnt, plane_normal, _ = self.get_plane_param(self.plane_ind) a,b,c = plane_normal[0], plane_normal[1], plane_normal[2] x0,y0,z0=plane_pnt[0],plane_pnt[1],plane_pnt[2], x = r * (np.random.rand(amount, 1) - 0.7) y = r * (np.random.rand(amount, 1) - 0.6) xy = np.concatenate([x,y], axis=-1) z = (a*(xy[...,0]-x0) + b*(xy[...,1]-y0))/(-c) + z0 gen_pnts = torch.as_tensor(np.stack([xy[...,0], xy[...,1], z], axis=-1), device="cuda", dtype=torch.float32) featuredim=self.opt.point_features_dim if "0" in list(self.opt.point_dir_mode): featuredim -= 3 if "0" in list(self.opt.point_conf_mode): featuredim -= 1 if "0" in list(self.opt.point_color_mode): featuredim -= 3 gen_embedding = torch.rand(1, len(gen_pnts), featuredim, device="cuda", dtype=torch.float32) gen_dir = torch.rand(1, len(gen_pnts), 3, device="cuda", dtype=torch.float32) gen_dir = gen_dir / torch.clamp(torch.norm(gen_dir, dim=-1, keepdim=True), min=1e-6) gen_color = torch.zeros([1, len(gen_pnts), 3], device="cuda", dtype=torch.float32) gen_conf = torch.full([1, len(gen_pnts), 1], 0.3, device="cuda", dtype=torch.float32) return gen_pnts, gen_embedding, gen_dir, gen_color, gen_conf def filter_plane(self, add_xyz): thresh = 0.2 plane_pnt, plane_normal, _ = self.get_plane_param(self.plane_ind) a, b, c = plane_normal[0], plane_normal[1], plane_normal[2] x0, y0, z0 = plane_pnt[0], plane_pnt[1], plane_pnt[2] d = -a * x0 - b * y0 - c * z0 dist = torch.abs(add_xyz[...,0] * a + add_xyz[...,1] * b + add_xyz[...,2] * c + d) return dist < thresh
40,122
41.912299
174
py
pointnerf
pointnerf-master/data/scannet_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from tqdm import tqdm from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py import models.mvs.mvs_utils as mvs_utils from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir from plyfile import PlyData, PlyElement FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class ScannetFtDataset(BaseDataset): def initialize(self, opt, img_wh=[800,800], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(opt.img_wh[0] * downSample), int(opt.img_wh[1] * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 1.0 self.max_len = max_len self.near_far = [opt.near_plane, opt.far_plane] self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'red': self.bg_color = (1, 0, 0) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() self.build_init_metas() self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) # if opt.normview > 0: # _, _ , w2cs, c2ws = self.build_proj_mats(list=torch.load('../data/dtu_configs/pairs.th')[f'{self.scan}_test']) # norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws) # if opt.normview >= 2: # self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32) # norm_w2c, norm_c2w = None, None # self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats() self.intrinsic = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/intrinsic/intrinsic_color.txt")).astype(np.float32)[:3,:3] self.depth_intrinsic = np.loadtxt( os.path.join(self.data_dir, self.scan, "exported/intrinsic/intrinsic_depth.txt")).astype(np.float32)[:3, :3] img = Image.open(self.image_paths[0]) ori_img_shape = list(self.transform(img).shape) # (4, h, w) self.intrinsic[0, :] *= (self.width / ori_img_shape[2]) self.intrinsic[1, :] *= (self.height / ori_img_shape[1]) # print(self.intrinsic) self.total = len(self.id_list) print("dataset total:", self.split, self.total) @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--edge_filter', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=0.5, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=5.0, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument( '--img_wh', type=int, nargs=2, default=(640, 480), help='resize target of the image' ) return parser def normalize_cam(self, w2cs, c2ws): index = 0 return w2cs[index], c2ws[index] def define_transforms(self): self.transform = T.ToTensor() def variance_of_laplacian(self, image): # compute the Laplacian of the image and then return the focus # measure, which is simply the variance of the Laplacian return cv2.Laplacian(image, cv2.CV_64F).var() def detect_blurry(self, list): blur_score = [] for id in list: image_path = os.path.join(self.data_dir, self.scan, "exported/color/{}.jpg".format(id)) image = cv2.imread(image_path) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) fm = self.variance_of_laplacian(gray) blur_score.append(fm) blur_score = np.asarray(blur_score) ids = blur_score.argsort()[:150] allind = np.asarray(list) print("most blurry images", allind[ids]) def remove_blurry(self, list): blur_path = os.path.join(self.data_dir, self.scan, "exported/blur_list.txt") if os.path.exists(blur_path): blur_lst = [] with open(blur_path) as f: lines = f.readlines() print("blur files", len(lines)) for line in lines: info = line.strip() blur_lst.append(int(info)) return [i for i in list if i not in blur_lst] else: print("no blur list detected, use all training frames!") return list def build_init_metas(self): colordir = os.path.join(self.data_dir, self.scan, "exported/color") self.image_paths = [f for f in os.listdir(colordir) if os.path.isfile(os.path.join(colordir, f))] self.image_paths = [os.path.join(self.data_dir, self.scan, "exported/color/{}.jpg".format(i)) for i in range(len(self.image_paths))] self.all_id_list = self.filter_valid_id(list(range(len(self.image_paths)))) if len(self.all_id_list) > 2900: # neural point-based graphics' configuration self.test_id_list = self.all_id_list[::100] self.train_id_list = [self.all_id_list[i] for i in range(len(self.all_id_list)) if (((i % 100) > 19) and ((i % 100) < 81 or (i//100+1)*100>=len(self.all_id_list)))] else: # nsvf configuration step=5 self.train_id_list = self.all_id_list[::step] self.test_id_list = [self.all_id_list[i] for i in range(len(self.all_id_list)) if (i % step) !=0] if self.opt.test_num_step != 1 else self.all_id_list print("all_id_list",len(self.all_id_list)) print("test_id_list",len(self.test_id_list), self.test_id_list) print("train_id_list",len(self.train_id_list)) self.train_id_list = self.remove_blurry(self.train_id_list) self.id_list = self.train_id_list if self.split=="train" else self.test_id_list self.view_id_list=[] def filter_valid_id(self, id_list): empty_lst=[] for id in id_list: c2w = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(id))).astype(np.float32) if np.max(np.abs(c2w)) < 30: empty_lst.append(id) return empty_lst def get_campos_ray(self): centerpixel=np.asarray(self.img_wh).astype(np.float32)[None,:] // 2 camposes=[] centerdirs=[] for id in self.id_list: c2w = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(id))).astype(np.float32) #@ self.blender2opencv campos = c2w[:3, 3] camrot = c2w[:3,:3] raydir = get_dtu_raydir(centerpixel, self.intrinsic, camrot, True) camposes.append(campos) centerdirs.append(raydir) camposes=np.stack(camposes, axis=0) # 2091, 3 centerdirs=np.concatenate(centerdirs, axis=0) # 2091, 3 # print("camposes", camposes.shape, centerdirs.shape) return torch.as_tensor(camposes, device="cuda", dtype=torch.float32), torch.as_tensor(centerdirs, device="cuda", dtype=torch.float32) def build_proj_mats(self, list=None, norm_w2c=None, norm_c2w=None): proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] list = self.id_list if list is None else list focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh self.focal = focal self.near_far = np.array([2.0, 6.0]) for vid in list: frame = self.meta['frames'][vid] c2w = np.array(frame['transform_matrix']) # @ self.blender2opencv if norm_w2c is not None: c2w = norm_w2c @ c2w w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsic = np.array([[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]) intrinsics.append(intrinsic.copy()) # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def parse_mesh(self): points_path = os.path.join(self.data_dir, self.scan, "exported/pcd.ply") mesh_path = os.path.join(self.data_dir, self.scan, self.scan + "_vh_clean.ply") plydata = PlyData.read(mesh_path) print("plydata 0", plydata.elements[0], plydata.elements[0].data["blue"].dtype) vertices = np.empty(len( plydata.elements[0].data["blue"]), dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) vertices['x'] = plydata.elements[0].data["x"].astype('f4') vertices['y'] = plydata.elements[0].data["y"].astype('f4') vertices['z'] = plydata.elements[0].data["z"].astype('f4') vertices['red'] = plydata.elements[0].data["red"].astype('u1') vertices['green'] = plydata.elements[0].data["green"].astype('u1') vertices['blue'] = plydata.elements[0].data["blue"].astype('u1') # save as ply ply = PlyData([PlyElement.describe(vertices, 'vertex')], text=False) ply.write(points_path) def load_init_points(self): points_path = os.path.join(self.data_dir, self.scan, "exported/pcd.ply") # points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply") if not os.path.exists(points_path): if not os.path.exists(points_path): self.parse_mesh() plydata = PlyData.read(points_path) # plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar')) x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32) points_xyz = torch.stack([x,y,z], dim=-1) if self.opt.ranges[0] > -99.0: ranges = torch.as_tensor(self.opt.ranges, device=points_xyz.device, dtype=torch.float32) mask = torch.prod(torch.logical_and(points_xyz >= ranges[None, :3], points_xyz <= ranges[None, 3:]), dim=-1) > 0 points_xyz = points_xyz[mask] # np.savetxt(os.path.join(self.data_dir, self.scan, "exported/pcd.txt"), points_xyz.cpu().numpy(), delimiter=";") return points_xyz def read_depth(self, filepath): depth_im = cv2.imread(filepath, -1).astype(np.float32) depth_im /= 1000 depth_im[depth_im > 8.0] = 0 depth_im[depth_im < 0.3] = 0 return depth_im def load_init_depth_points(self, device="cuda", vox_res=0): py, px = torch.meshgrid( torch.arange(0, 480, dtype=torch.float32, device=device), torch.arange(0, 640, dtype=torch.float32, device=device)) # print("max py, px", torch.max(py), torch.max(px)) # print("min py, px", torch.min(py), torch.min(px)) img_xy = torch.stack([px, py], dim=-1) # [480, 640, 2] # print(img_xy.shape, img_xy[:10]) reverse_intrin = torch.inverse(torch.as_tensor(self.depth_intrinsic)).t().to(device) world_xyz_all = torch.zeros([0,3], device=device, dtype=torch.float32) for i in tqdm(range(len(self.all_id_list))): id = self.all_id_list[i] c2w = torch.as_tensor(np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(id))).astype(np.float32), device=device, dtype=torch.float32) #@ self.blender2opencv # 480, 640, 1 depth = torch.as_tensor(self.read_depth(os.path.join(self.data_dir, self.scan, "exported/depth/{}.png".format(id))), device=device)[..., None] cam_xy = img_xy * depth cam_xyz = torch.cat([cam_xy, depth], dim=-1) cam_xyz = cam_xyz @ reverse_intrin cam_xyz = cam_xyz[cam_xyz[...,2] > 0,:] cam_xyz = torch.cat([cam_xyz, torch.ones_like(cam_xyz[...,:1])], dim=-1) world_xyz = (cam_xyz.view(-1,4) @ c2w.t())[...,:3] # print("cam_xyz", torch.min(cam_xyz, dim=-2)[0], torch.max(cam_xyz, dim=-2)[0]) # print("world_xyz", world_xyz.shape) #, torch.min(world_xyz.view(-1,3), dim=-2)[0], torch.max(world_xyz.view(-1,3), dim=-2)[0]) if vox_res > 0: world_xyz = mvs_utils.construct_vox_points_xyz(world_xyz, vox_res) # print("world_xyz", world_xyz.shape) world_xyz_all = torch.cat([world_xyz_all, world_xyz], dim=0) if self.opt.ranges[0] > -99.0: ranges = torch.as_tensor(self.opt.ranges, device=world_xyz_all.device, dtype=torch.float32) mask = torch.prod(torch.logical_and(world_xyz_all >= ranges[None, :3], world_xyz_all <= ranges[None, 3:]), dim=-1) > 0 world_xyz_all = world_xyz_all[mask] return world_xyz_all def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.view_id_dict[i] # mvs_images += [self.normalize_rgb(self.blackimgs[vid])] # mvs_images += [self.whiteimgs[vid]] mvs_images += [self.blackimgs[vid]] imgs += [self.whiteimgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) depths_h.append(self.depths[vid]) alphas.append(self.alphas[vid]) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) depths_h = np.stack(depths_h) alphas = np.stack(alphas) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = alphas.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars'] = near_fars.astype(np.float32) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False, full_img=False): item = {} vid = self.id_list[id] image_path = os.path.join(self.data_dir, self.scan, "exported/color/{}.jpg".format(vid)) # print("vid",vid) img = Image.open(image_path) img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) c2w = np.loadtxt(os.path.join(self.data_dir, self.scan, "exported/pose", "{}.txt".format(vid))).astype(np.float32) # w2c = np.linalg.inv(c2w) intrinsic = self.intrinsic # print("gt_image", gt_image.shape) width, height = img.shape[2], img.shape[1] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["c2w"] = torch.from_numpy(c2w).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([self.near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([self.near_far[0]]).view(1, 1) item['h'] = height item['w'] = width item['id'] = id item['vid'] = vid # bounding box margin = self.opt.edge_filter if full_img: item['images'] = img[None,...].clone() gt_image = np.transpose(img, (1, 2, 0)) subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(margin, width - margin - subsamplesize + 1) indy = np.random.randint(margin, height - margin - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(margin, width-margin, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(margin, height-margin, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(margin, width - margin - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(margin, height - margin - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(margin, width - margin).astype(np.float32), np.arange(margin, height- margin).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = (transform_matrix[0:3, 0:3]) campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) # near far if self.opt.near_plane is not None: near = self.opt.near_plane else: near = max(dist - 1.5, 0.02) if self.opt.far_plane is not None: far = self.opt.far_plane # near + else: far = dist + 0.7 middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([far]).view(1, 1) item['near'] = torch.FloatTensor([near]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": px, py = self.proportional_select(gt_mask) else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) raydir = get_blender_raydir(pixelcoords, self.height, self.width, focal, camrot, self.opt.dir_norm > 0) item["pixel_idx"] = pixelcoords raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
32,944
43.162198
321
py
pointnerf
pointnerf-master/data/nerf_synth360_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from . import data_utils from plyfile import PlyData, PlyElement from torch.utils.data import Dataset, DataLoader import torch import h5py from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def pose_spherical(theta, phi, radius): c2w = trans_t(radius) c2w = rot_phi(phi/180.*np.pi) @ c2w c2w = rot_theta(theta/180.*np.pi) @ c2w c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w c2w = c2w #@ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) return c2w trans_t = lambda t : np.asarray([ [1,0,0,0], [0,1,0,0], [0,0,1,t], [0,0,0,1], ], dtype=np.float32) rot_phi = lambda phi : np.asarray([ [1,0,0,0], [0,np.cos(phi),-np.sin(phi),0], [0,np.sin(phi), np.cos(phi),0], [0,0,0,1], ], dtype=np.float32) rot_theta = lambda th : np.asarray([ [np.cos(th),0,-np.sin(th),0], [0,1,0,0], [np.sin(th),0, np.cos(th),0], [0,0,0,1], ], dtype=np.float32) def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class NerfSynth360FtDataset(BaseDataset): def initialize(self, opt, img_wh=[800,800], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(800 * downSample), int(800 * downSample)) self.downSample = downSample self.scale_factor = 1.0 / 1.0 self.max_len = max_len self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() meta_split = "train" if self.split == "render" else self.split with open(os.path.join(self.data_dir, self.scan, f'transforms_{meta_split}.json'), 'r') as f: self.meta = json.load(f) with open(os.path.join(self.data_dir, self.scan, f'transforms_test.json'), 'r') as f: self.testmeta = json.load(f) self.id_list = [i for i in range(len(self.meta["frames"]))] self.test_id_list = [i for i in range(len(self.testmeta["frames"]))] self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) if opt.normview > 0: _, _ , w2cs, c2ws = self.build_proj_mats(list=self.test_id_list) norm_w2c, norm_c2w = self.normalize_cam(w2cs, c2ws) if opt.normview >= 2: self.norm_w2c, self.norm_c2w = torch.as_tensor(norm_w2c, device="cuda", dtype=torch.float32), torch.as_tensor(norm_c2w, device="cuda", dtype=torch.float32) norm_w2c, norm_c2w = None, None self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats(norm_w2c=norm_w2c, norm_c2w=norm_c2w) if self.split != "render": self.build_init_metas() self.read_meta() self.total = len(self.id_list) print("dataset total:", self.split, self.total) else: self.get_render_poses() print("render only, pose total:", self.total) def get_render_poses(self): stride = 20 #self.opt.render_stride radius = 4 #self.opt.render_radius self.render_poses = np.stack([pose_spherical(angle, -30.0, radius) @ self.blender2opencv for angle in np.linspace(-180, 180, stride + 1)[:-1]], 0) self.total = len(self.render_poses) @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random sample parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--bg_filtering', type=int, default=0, help= '0 for alpha channel filtering, 1 for background color filtering' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument( '--full_comb', type=int, default=0, help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') return parser def normalize_cam(self, w2cs, c2ws): # cam_xyz = c2ws[..., :3, 3] # rtp = self.bcart2sphere(cam_xyz) # print(rtp.shape) # rtp = np.mean(rtp, axis=0) # avg_xyz = self.sphere2cart(rtp) # euler_lst = [] # for i in range(len(c2ws)): # euler_angles = self.matrix2euler(c2ws[i][:3,:3]) # print("euler_angles", euler_angles) # euler_lst += [euler_angles] # euler = np.mean(np.stack(euler_lst, axis=0), axis=0) # print("euler mean ",euler) # M = self.euler2matrix(euler) # norm_c2w = np.eye(4) # norm_c2w[:3,:3] = M # norm_c2w[:3,3] = avg_xyz # norm_w2c = np.linalg.inv(norm_c2w) # return norm_w2c, norm_c2w index = 0 return w2cs[index], c2ws[index] def define_transforms(self): self.transform = T.ToTensor() def get_campos_ray(self): centerpixel = np.asarray(self.img_wh).astype(np.float32)[None, :] // 2 camposes = [] centerdirs = [] for i, idx in enumerate(self.id_list): c2w = self.cam2worlds[i].astype(np.float32) campos = c2w[:3, 3] camrot = c2w[:3, :3] raydir = get_dtu_raydir(centerpixel, self.intrinsics[0].astype(np.float32), camrot, True) camposes.append(campos) centerdirs.append(raydir) camposes = np.stack(camposes, axis=0) # 2091, 3 centerdirs = np.concatenate(centerdirs, axis=0) # 2091, 3 # print("camposes", camposes.shape, centerdirs.shape) return torch.as_tensor(camposes, device="cuda", dtype=torch.float32), torch.as_tensor(centerdirs, device="cuda", dtype=torch.float32) def build_init_metas(self): self.view_id_list = [] cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds] _, _, w2cs, c2ws = self.build_proj_mats(meta=self.testmeta, list=self.test_id_list) test_cam_xyz_lst = [c2w[:3,3] for c2w in c2ws] if self.split=="train": cam_xyz = np.stack(cam_xyz_lst, axis=0) test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0) triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=test_cam_xyz, full_comb=self.opt.full_comb>0) self.view_id_list = [triangles[i] for i in range(len(triangles))] if self.opt.full_comb<0: with open(f'../data/nerf_synth_configs/list/lego360_init_pairs.txt') as f: for line in f: str_lst = line.rstrip().split(',') src_views = [int(x) for x in str_lst] self.view_id_list.append(src_views) def load_init_points(self): points_path = os.path.join(self.data_dir, self.scan, "colmap_results/dense/fused.ply") # points_path = os.path.join(self.data_dir, self.scan, "exported/pcd_te_1_vs_0.01_jit.ply") assert os.path.exists(points_path) plydata = PlyData.read(points_path) # plydata (PlyProperty('x', 'double'), PlyProperty('y', 'double'), PlyProperty('z', 'double'), PlyProperty('nx', 'double'), PlyProperty('ny', 'double'), PlyProperty('nz', 'double'), PlyProperty('red', 'uchar'), PlyProperty('green', 'uchar'), PlyProperty('blue', 'uchar')) print("plydata", plydata.elements[0]) x,y,z=torch.as_tensor(plydata.elements[0].data["x"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["y"].astype(np.float32), device="cuda", dtype=torch.float32), torch.as_tensor(plydata.elements[0].data["z"].astype(np.float32), device="cuda", dtype=torch.float32) points_xyz = torch.stack([x,y,z], dim=-1).to(torch.float32) # np.savetxt(os.path.join(self.data_dir, self.scan, "exported/pcd.txt"), points_xyz.cpu().numpy(), delimiter=";") if self.opt.comb_file is not None: file_points = np.loadtxt(self.opt.comb_file, delimiter=";") print("file_points", file_points.shape) comb_xyz = torch.as_tensor(file_points[...,:3].astype(np.float32), device=points_xyz.device, dtype=points_xyz.dtype) points_xyz = torch.cat([points_xyz, comb_xyz], dim=0) # np.savetxt("/home/xharlie/user_space/codes/testNr/checkpoints/pcolallship360_load_confcolordir_KNN8_LRelu_grid320_333_agg2_prl2e3_prune1e4/points/save.txt", points_xyz.cpu().numpy(), delimiter=";") return points_xyz def build_proj_mats(self, meta=None, list=None, norm_w2c=None, norm_c2w=None): proj_mats, intrinsics, world2cams, cam2worlds = [], [], [], [] list = self.id_list if list is None else list meta = self.meta if meta is None else meta focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh self.focal = focal self.near_far = np.array([2.0, 6.0]) for vid in list: frame = meta['frames'][vid] c2w = np.array(frame['transform_matrix']) @ self.blender2opencv if norm_w2c is not None: c2w = norm_w2c @ c2w w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsic = np.array([[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]) intrinsics.append(intrinsic.copy().astype(np.float32)) # multiply intrinsics and extrinsics to get projection matrix proj_mat_l = np.eye(4) intrinsic[:2] = intrinsic[:2] / 4 proj_mat_l[:3, :4] = intrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats, intrinsics = np.stack(proj_mats), np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def read_meta(self): w, h = self.img_wh self.image_paths = [] self.poses = [] self.all_rays = [] self.mvsimgs = [] self.render_gtimgs = [] self.depths = [] self.alphas = [] self.view_id_dict = {} self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3) count = 0 for i, idx in enumerate(self.id_list): frame = self.meta['frames'][idx] image_path = os.path.join(self.data_dir, self.scan, f"{frame['file_path']}.png") self.image_paths += [image_path] img = Image.open(image_path) img = img.resize(self.img_wh, Image.Resampling.LANCZOS) img = self.transform(img) # (4, h, w) self.depths += [(img[-1:, ...] > 0.1).numpy().astype(np.float32)] self.mvsimgs += [img[:3] * img[-1:]] self.render_gtimgs += [img[:3] * img[-1:] + (1 - img[-1:])] if self.opt.bg_filtering: self.alphas += [ (torch.norm(self.mvsimgs[-1][:3], dim=0, keepdim=True) > 1e-6).numpy().astype(np.float32)] else: self.alphas += [img[-1:].numpy().astype(np.float32)] # ray directions for all pixels, same for all images (same H, W, focal) # rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3) # # self.all_rays += [torch.cat([rays_o, rays_d, # self.near_far[0] * torch.ones_like(rays_o[:, :1]), # self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8) self.view_id_dict[idx] = i self.poses = self.cam2worlds def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def get_init_item(self, idx, crop=False): sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = self.view_id_dict[i] # mvs_images += [self.normalize_rgb(self.mvsimgs[vid])] # mvs_images += [self.render_gtimgs[vid]] mvs_images += [self.mvsimgs[vid]] imgs += [self.render_gtimgs[vid]] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) depths_h.append(self.depths[vid]) alphas.append(self.alphas[vid]) near_fars.append(near_far) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) depths_h = np.stack(depths_h) alphas = np.stack(alphas) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = alphas.astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars_depth'] = near_fars.astype(np.float32)[0] sample['near_fars'] = np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1)) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False, full_img=False): item = {} img = self.render_gtimgs[id] w2c = self.world2cams[id] c2w = self.cam2worlds[id] intrinsic = self.intrinsics[id] proj_mat_ls, near_far = self.proj_mats[id] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item["c2w"] = torch.from_numpy(c2w).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1) item['h'] = height item['w'] = width item['depths_h'] = self.depths[id] # bounding box if full_img: item['images'] = img[None,...] subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = id if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = transform_matrix[0:3, 0:3] campos = transform_matrix[0:3, 3] focal = self.focal item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] item['intrinsic'] = self.intrinsics[0] # near far item['far'] = torch.FloatTensor([self.opt.far_plane]).view(1, 1) item['near'] = torch.FloatTensor([self.opt.near_plane]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, self.intrinsics[0], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() item['id'] = idx if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
30,908
40.488591
321
py
pointnerf
pointnerf-master/data/__init__.py
import importlib import torch.utils.data import sys sys.path.append("../") from utils.ncg_string import underscore2camelcase from .base_dataset import BaseDataset import numpy as np import time def find_dataset_class_by_name(name): ''' Input name: string with underscore representation Output dataset: a dataset class with class name {camelcase(name)}Dataset Searches for a dataset module with name {name}_dataset in current directory, returns the class with name {camelcase(name)}Dataset found in the module. ''' cls_name = underscore2camelcase(name) + 'Dataset' filename = "data.{}_dataset".format(name) module = importlib.import_module(filename) assert cls_name in module.__dict__, 'Cannot find dataset class name "{}" in "{}"'.format( cls_name, filename) cls = module.__dict__[cls_name] assert issubclass(cls, BaseDataset), 'Dataset class "{}" must inherit from BaseDataset'.format(cls_name) return cls def get_option_setter(dataset_name): dataset_class = find_dataset_class_by_name(dataset_name) return dataset_class.modify_commandline_options def create_dataset(opt): dataset = find_dataset_class_by_name(opt.dataset_name) instance = dataset() instance.initialize(opt) print("dataset [{}] was created".format(instance.name())) return instance def create_data_loader(opt, dataset=None): data_loader = DefaultDataLoader() data_loader.initialize(opt, dataset=dataset) return data_loader def worker_init_fn(worker_id): # np.random.seed(np.random.get_state()[1][0] + worker_id) np.random.seed((worker_id + torch.initial_seed() + np.floor(time.time()).astype(np.int64)) % np.iinfo(np.int32).max) class DefaultDataLoader: def name(self): return self.__class__.name def initialize(self, opt, dataset=None): assert opt.batch_size >= 1 assert opt.n_threads >= 0 assert opt.max_dataset_size >= 1 self.opt = opt self.dataset = create_dataset(opt) if dataset is None else dataset self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=not opt.serial_batches, num_workers=int(opt.n_threads), worker_init_fn=worker_init_fn) def load_data(self): return self.dataset def __len__(self): return min(len(self.dataset), self.opt.max_dataset_size) def __iter__(self): for i, data in enumerate(self.dataloader): if i * self.opt.batch_size >= self.opt.max_dataset_size: break yield data def get_item(self, index): return self.dataset.get_item(index)
2,880
31.738636
120
py
pointnerf
pointnerf-master/data/load_blender.py
import os import numpy as np import imageio import json import torch import pickle, random # trans_t = lambda t : tf.convert_to_tensor([ # [1,0,0,0], # [0,1,0,0], # [0,0,1,t], # [0,0,0,1], # ], dtype=tf.float32) # # rot_phi = lambda phi : tf.convert_to_tensor([ # [1,0,0,0], # [0,tf.cos(phi),-tf.sin(phi),0], # [0,tf.sin(phi), tf.cos(phi),0], # [0,0,0,1], # ], dtype=tf.float32) # # rot_theta = lambda th : tf.convert_to_tensor([ # [tf.cos(th),0,-tf.sin(th),0], # [0,1,0,0], # [tf.sin(th),0, tf.cos(th),0], # [0,0,0,1], # ], dtype=tf.float32) trans_t = lambda t : np.asarray([ [1,0,0,0], [0,1,0,0], [0,0,1,t], [0,0,0,1], ], dtype=np.float32) rot_phi = lambda phi : np.asarray([ [1,0,0,0], [0,np.cos(phi),-np.sin(phi),0], [0,np.sin(phi), np.cos(phi),0], [0,0,0,1], ], dtype=np.float32) rot_theta = lambda th : np.asarray([ [np.cos(th),0,-np.sin(th),0], [0,1,0,0], [np.sin(th),0, np.cos(th),0], [0,0,0,1], ], dtype=np.float32) def pose_spherical(theta, phi, radius): c2w = trans_t(radius) c2w = rot_phi(phi/180.*np.pi) @ c2w c2w = rot_theta(theta/180.*np.pi) @ c2w c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w return c2w blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) def load_blender_data(basedir, splits, half_res=False, testskip=1): splits = ['train', 'val', 'test'] if splits is None else splits metas = {} for s in splits: with open(os.path.join(basedir, 'transforms_{}.json'.format(s)), 'r') as fp: metas[s] = json.load(fp) all_imgs = [] all_poses = [] counts = [0] for s in splits: meta = metas[s] imgs = [] poses = [] if s=='train' or testskip==0: skip = 1 else: skip = testskip for frame in meta['frames'][::skip]: fname = os.path.join(basedir, frame['file_path'] + '.png') imgs.append(imageio.imread(fname)) poses.append(np.array(frame['transform_matrix']) @ blender2opencv) imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA) poses = np.array(poses).astype(np.float32) counts.append(counts[-1] + imgs.shape[0]) all_imgs.append(imgs) all_poses.append(poses) i_split = [np.arange(counts[i], counts[i+1]) for i in range(len(splits))] imgs = np.concatenate(all_imgs, 0) poses = np.concatenate(all_poses, 0) H, W = imgs[0].shape[:2] camera_angle_x = float(meta['camera_angle_x']) focal = .5 * W / np.tan(.5 * camera_angle_x) stride = 20 render_poses = np.stack([pose_spherical(angle, -30.0, 4.0) for angle in np.linspace(-180, 180, stride+1)[:-1]],0) if half_res: imgs = tf.image.resize_area(imgs, [400, 400]).numpy() H = H//2 W = W//2 focal = focal/2. intrinsic = np.asarray([[focal, 0, W/2], [0, focal, H/2], [0,0,1]]) return imgs, poses, render_poses, [H, W, focal], i_split, intrinsic def load_blender_cloud(point_path, point_num): point_norms = None with open(point_path, 'rb') as f: print("point_file_path ################", point_path) all_infos = pickle.load(f) point_xyz = all_infos["point_xyz"] if "point_face_normal" in all_infos: point_norms = all_infos["point_face_normal"] print("surface point cloud ",len(point_xyz), "mean pos:", np.mean(point_xyz, axis=0), "min pos:",np.min(point_xyz, axis=0), "mean max:",np.max(point_xyz, axis=0)) if point_num < len(point_xyz): inds = np.asarray(random.choices(range(len(point_xyz)), k=point_num)) point_norms = point_norms[inds, :] if point_norms is not None else None return point_xyz[inds, :], point_norms else: return point_xyz, point_norms
3,956
29.206107
166
py
pointnerf
pointnerf-master/data/tt_ft_dataset.py
from models.mvs.mvs_utils import read_pfm import os import numpy as np import cv2 from PIL import Image import torch from torchvision import transforms as T import torchvision.transforms.functional as F from kornia import create_meshgrid import time import json from . import data_utils from plyfile import PlyData, PlyElement import copy from torch.utils.data import Dataset, DataLoader import torch import os from PIL import Image import h5py from data.base_dataset import BaseDataset import configparser from os.path import join import cv2 # import torch.nn.functional as F from .data_utils import get_dtu_raydir FLIP_Z = np.asarray([ [1,0,0], [0,1,0], [0,0,-1], ], dtype=np.float32) def colorjitter(img, factor): # brightness_factor,contrast_factor,saturation_factor,hue_factor # img = F.adjust_brightness(img, factor[0]) # img = F.adjust_contrast(img, factor[1]) img = F.adjust_saturation(img, factor[2]) img = F.adjust_hue(img, factor[3]-1.0) return img def pose_spherical(theta, phi, radius): c2w = trans_t(radius) c2w = rot_phi(phi/180.*np.pi) @ c2w # c2w = rot_theta(theta/180.*np.pi) @ c2w c2w = rot_beta(theta/180.*np.pi) @ c2w # c2w = rot_beta(90/180.*np.pi) @ c2w c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w c2w = c2w #@ np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) return c2w trans_t = lambda t : np.asarray([ [1,0,0,0], [0,1,0,0], [0,0,1,t], [0,0,0,1], ], dtype=np.float32) rot_phi = lambda phi : np.asarray([ [1,0,0,0], [0,np.cos(phi),-np.sin(phi),0], [0,np.sin(phi), np.cos(phi),0], [0,0,0,1], ], dtype=np.float32) rot_theta = lambda th : np.asarray([ [np.cos(th),0,-np.sin(th),0], [0,1,0,0], [np.sin(th),0, np.cos(th),0], [0,0,0,1], ], dtype=np.float32) rot_beta = lambda th : np.asarray([ [np.cos(th),-np.sin(th), 0, 0], [np.sin(th),np.cos(th), 0, 0], [0,0,1,0], [0,0,0,1], ], dtype=np.float32) def get_rays(directions, c2w): """ Get ray origin and normalized directions in world coordinate for all pixels in one image. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: directions: (H, W, 3) precomputed ray directions in camera coordinate c2w: (3, 4) transformation matrix from camera coordinate to world coordinate Outputs: rays_o: (H*W, 3), the origin of the rays in world coordinate rays_d: (H*W, 3), the normalized direction of the rays in world coordinate """ # Rotate ray directions from camera coordinate to the world coordinate c2w = torch.FloatTensor(c2w) rays_d = directions @ c2w[:3, :3].T # (H, W, 3) # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) # The origin of all rays is the camera origin in world coordinate rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) rays_d = rays_d.view(-1, 3) rays_o = rays_o.view(-1, 3) return rays_o, rays_d def get_ray_directions(H, W, focal, center=None): """ Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal: image height, width and focal length Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate """ grid = create_meshgrid(H, W, normalized_coordinates=False)[0] i, j = grid.unbind(-1) # the direction here is without +0.5 pixel centering as calibration is not so accurate # see https://github.com/bmild/nerf/issues/24 cent = center if center is not None else [W / 2, H / 2] directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3) return directions class TtFtDataset(BaseDataset): def initialize(self, opt, img_wh=[1920,1080], downSample=1.0, max_len=-1, norm_w2c=None, norm_c2w=None): self.opt = opt self.data_dir = opt.data_root self.scan = opt.scan self.split = opt.split self.img_wh = (int(opt.img_wh[0] * downSample), int(opt.img_wh[1] * downSample)) self.downSample = downSample self.alphas=None self.scale_factor = 1.0 / 1.0 self.max_len = max_len # self.cam_trans = np.diag(np.array([1, -1, -1, 1], dtype=np.float32)) self.cam_trans = np.diag(np.array([-1, 1, 1, 1], dtype=np.float32)) self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) self.height, self.width = int(self.img_wh[1]), int(self.img_wh[0]) if not self.opt.bg_color or self.opt.bg_color == 'black': self.bg_color = (0, 0, 0) elif self.opt.bg_color == 'white': self.bg_color = (1, 1, 1) elif self.opt.bg_color == 'random': self.bg_color = 'random' else: self.bg_color = [float(one) for one in self.opt.bg_color.split(",")] self.define_transforms() self.build_init_metas() self.norm_w2c, self.norm_c2w = torch.eye(4, device="cuda", dtype=torch.float32), torch.eye(4, device="cuda", dtype=torch.float32) self.near_far = np.array([opt.near_plane, opt.far_plane]) self.intrinsic = self.get_instrinsic() img = Image.open(self.image_paths[0]) self.ori_img_shape = list(self.transform(img).shape) # (4, h, w) self.intrinsic[0, :] *= (self.width / self.ori_img_shape[2]) self.intrinsic[1, :] *= (self.height / self.ori_img_shape[1]) self.proj_mats, self.intrinsics, self.world2cams, self.cam2worlds = self.build_proj_mats() if self.split != "render": self.build_init_view_lst() self.total = len(self.id_list) print("dataset total:", self.split, self.total) else: self.get_render_poses() print("render only, pose total:", self.total) def get_render_poses(self): # print("pose file", os.path.join(self.data_dir, self.scan, "test_traj.txt")) # self.render_poses = np.loadtxt(os.path.join(self.data_dir, self.scan, "test_traj.txt")).reshape(-1,4,4) # print("self.render_poses", self.render_poses) # self.total = len(self.render_poses) stride = 100 # self.opt.render_stride # radius = 1.6 # self.opt.render_radius @ self.blender2opencv parameters = {"Ignatius": [1.7, 1.7, -87.0], "Truck": [2.5, 1.5, 91.0], "Caterpillar": [2.2, 2.2, -89.0], "Family": [0.9, 0.9, -91.0] , "Barn": [2.5, 2.5, 88.0]} a, b, phi = parameters[self.opt.scan] # self.opt.render_radius @ self.blender2opencv self.render_poses = np.stack([pose_spherical(angle, phi, self.radius_func(angle, a, b)) @ self.blender2opencv for angle in np.linspace(-180, 180, stride + 1)[:-1]], 0) # print("self.render_poses", self.render_poses[0]) self.total = len(self.render_poses) def radius_func(self, angle, a, b): # return 1.2 + abs(np.cos((180 + angle - 36) * np.pi / 180) * radius) theta = (angle - (36-180)) * np.pi / 180 return a * b / np.sqrt(a*a*np.sin(theta)**2 + b*b*np.cos(theta)**2) def get_instrinsic(self): filepath = os.path.join(self.data_dir, self.scan, "intrinsics.txt") try: intrinsic = np.loadtxt(filepath).astype(np.float32)[:3, :3] return intrinsic except ValueError: pass # Get camera intrinsics with open(filepath, 'r') as file: f, cx, cy, _ = map(float, file.readline().split()) fy=fx = f # Build the intrinsic matrices intrinsic = np.array([[fx, 0., cx], [0., fy, cy], [0., 0, 1]]) return intrinsic @staticmethod def modify_commandline_options(parser, is_train): # ['random', 'random2', 'patch'], default: no random samplec parser.add_argument('--random_sample', type=str, default='none', help='random sample pixels') parser.add_argument('--random_sample_size', type=int, default=1024, help='number of random samples') parser.add_argument('--init_view_num', type=int, default=3, help='number of random samples') parser.add_argument('--shape_id', type=int, default=0, help='shape id') parser.add_argument('--trgt_id', type=int, default=0, help='shape id') parser.add_argument('--num_nn', type=int, default=1, help='number of nearest views in a batch') parser.add_argument( '--near_plane', type=float, default=2.125, help= 'Near clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--far_plane', type=float, default=4.525, help= 'Far clipping plane, by default it is computed according to the distance of the camera ' ) parser.add_argument( '--bg_color', type=str, default="white", help= 'background color, white|black(None)|random|rgb (float, float, float)' ) parser.add_argument( '--scan', type=str, default="scan1", help='' ) parser.add_argument( '--full_comb', type=int, default=0, help='' ) parser.add_argument('--inverse_gamma_image', type=int, default=-1, help='de-gamma correct the input image') parser.add_argument('--pin_data_in_memory', type=int, default=-1, help='load whole data in memory') parser.add_argument('--normview', type=int, default=0, help='load whole data in memory') parser.add_argument( '--id_range', type=int, nargs=3, default=(0, 385, 1), help= 'the range of data ids selected in the original dataset. The default is range(0, 385). If the ids cannot be generated by range, use --id_list to specify any ids.' ) parser.add_argument( '--id_list', type=int, nargs='+', default=None, help= 'the list of data ids selected in the original dataset. The default is range(0, 385).' ) parser.add_argument( '--split', type=str, default="train", help= 'train, val, test' ) parser.add_argument("--half_res", action='store_true', help='load blender synthetic data at 400x400 instead of 800x800') parser.add_argument("--testskip", type=int, default=8, help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels') parser.add_argument('--dir_norm', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument('--train_load_num', type=int, default=0, help='normalize the ray_dir to unit length or not, default not') parser.add_argument( '--img_wh', type=int, nargs=2, default=(1920, 1080), # default=(1088, 640), help='resize target of the image' ) parser.add_argument( '--mvs_img_wh', type=int, nargs=2, # default=(1920, 1080), 1590, 960 default=(1088, 640), help='resize target of the image' ) return parser def build_init_metas(self): colordir = os.path.join(self.data_dir, self.scan, "rgb") train_image_paths = [f for f in os.listdir(colordir) if os.path.isfile(os.path.join(colordir, f)) and f.startswith("0")] test_image_paths = [f for f in os.listdir(colordir) if os.path.isfile(os.path.join(colordir, f)) and f.startswith("1")] self.train_id_list = list(range(len(train_image_paths))) self.test_id_list = list(range(len(test_image_paths))) self.train_image_paths = ["" for i in self.train_id_list] self.test_image_paths = ["" for i in self.test_id_list] self.train_pos_paths = ["" for i in self.train_id_list] self.test_pos_paths = ["" for i in self.test_id_list] for train_path in train_image_paths: id = int(train_path.split("_")[1]) self.train_image_paths[id] = os.path.join(self.data_dir, self.scan, "rgb/{}".format(train_path)) self.train_pos_paths[id] = os.path.join(self.data_dir, self.scan, "pose/{}.txt".format(train_path[:-4])) for test_path in test_image_paths: id = int(test_path.split("_")[1]) self.test_image_paths[id] = os.path.join(self.data_dir, self.scan, "rgb/{}".format(test_path)) self.test_pos_paths[id] = os.path.join(self.data_dir, self.scan, "pose/{}.txt".format(test_path[:-4])) self.id_list = self.train_id_list if self.split=="train" else self.test_id_list self.pos_paths = self.train_pos_paths if self.split=="train" else self.test_pos_paths self.image_paths = self.train_image_paths if self.split=="train" else self.test_image_paths if self.opt.ranges[0] > -90.0: self.spacemin, self.spacemax = torch.as_tensor(self.opt.ranges[:3]), torch.as_tensor(self.opt.ranges[3:6]) else: minmax = np.loadtxt(os.path.join(self.data_dir, self.scan, "bbox.txt")).astype(np.float32)[:6] self.spacemin, self.spacemax = torch.as_tensor(minmax[:3]), torch.as_tensor(minmax[3:6]) def build_init_view_lst(self): self.view_id_list = [] cam_xyz_lst = [c2w[:3,3] for c2w in self.cam2worlds] # _, _, w2cs, c2ws = self.build_proj_mats(meta=self.testmeta, list=self.test_id_list) # test_cam_xyz_lst = [c2w[:3,3] for c2w in c2ws] cam_points = [np.array([[0, 0, 0.1]], dtype=np.float32) @ c2w[:3, :3].T for c2w in self.cam2worlds] if self.split=="train": cam_xyz = np.stack(cam_xyz_lst, axis=0) cam_points = np.concatenate(cam_points, axis=0) + cam_xyz # test_cam_xyz = np.stack(test_cam_xyz_lst, axis=0) print("cam_points", cam_points.shape, cam_xyz.shape, np.linalg.norm(cam_xyz, axis=-1)) triangles = data_utils.triangluation_bpa(cam_xyz, test_pnts=cam_points, full_comb=self.opt.full_comb>0) self.view_id_list = [triangles[i] for i in range(len(triangles))] def define_transforms(self): self.transform = T.ToTensor() def build_proj_mats(self): proj_mats, world2cams, cam2worlds, intrinsics = [], [], [], [] list = self.id_list dintrinsic = self.get_instrinsic() dintrinsic[0, :] *= (self.opt.mvs_img_wh[0] / self.ori_img_shape[2]) dintrinsic[1, :] *= (self.opt.mvs_img_wh[1] / self.ori_img_shape[1]) for vid in list: c2w = np.loadtxt(self.pos_paths[vid]) # @ self.cam_trans w2c = np.linalg.inv(c2w) cam2worlds.append(c2w) world2cams.append(w2c) intrinsics.append(dintrinsic) proj_mat_l = np.eye(4) downintrinsic = copy.deepcopy(dintrinsic) downintrinsic[:2] = downintrinsic[:2] / 4 proj_mat_l[:3, :4] = downintrinsic @ w2c[:3, :4] proj_mats += [(proj_mat_l, self.near_far)] proj_mats = np.stack(proj_mats) intrinsics = np.stack(intrinsics) world2cams, cam2worlds = np.stack(world2cams), np.stack(cam2worlds) return proj_mats, intrinsics, world2cams, cam2worlds def define_transforms(self): self.transform = T.ToTensor() def read_meta(self): w, h = self.img_wh self.image_paths = [] self.poses = [] self.all_rays = [] self.blackimgs = [] self.whiteimgs = [] self.depths = [] self.alphas = [] self.view_id_dict = {} self.directions = get_ray_directions(h, w, [self.focal, self.focal]) # (h, w, 3) count = 0 for i, idx in enumerate(self.id_list): frame = self.meta['frames'][idx] image_path = os.path.join(self.data_dir, self.scan, f"{frame['file_path']}.png") self.image_paths += [image_path] img = Image.open(image_path) img = img.resize(self.img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) self.depths += [(img[-1:, ...] > 0.1).numpy().astype(np.float32)] self.alphas += [img[-1:].numpy().astype(np.float32)] self.blackimgs += [img[:3] * img[-1:]] self.whiteimgs += [img[:3] * img[-1:] + (1 - img[-1:])] # ray directions for all pixels, same for all images (same H, W, focal) # rays_o, rays_d = get_rays(self.directions, self.cam2worlds[i]) # both (h*w, 3) # # self.all_rays += [torch.cat([rays_o, rays_d, # self.near_far[0] * torch.ones_like(rays_o[:, :1]), # self.near_far[1] * torch.ones_like(rays_o[:, :1])], 1)] # (h*w, 8) self.view_id_dict[idx] = i self.poses = self.cam2worlds def __len__(self): if self.split == 'train': return len(self.id_list) if self.max_len <= 0 else self.max_len return len(self.id_list) if self.max_len <= 0 else self.max_len def name(self): return 'NerfSynthFtDataset' def __del__(self): print("end loading") def normalize_rgb(self, data): # to unnormalize image for visualization # data C, H, W C, H, W = data.shape mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(3, 1, 1) std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(3, 1, 1) return (data - mean) / std def read_img_path(self, image_path, img_wh, black=False): img = Image.open(image_path) img = img.resize(img_wh, Image.LANCZOS) img = self.transform(img) # (4, h, w) if img.shape[0] == 4: alpha = img[-1:].numpy().astype(np.float32) blackimg = img[:3] * img[-1:] whiteimg = img[:3] * img[-1:] + (1 - img[-1:]) return blackimg, whiteimg, alpha[0,...] > 0 # print("img",img) alpha = torch.norm(1.0 - img, dim=0) > 0.0001 blackimg = None if black: blackimg = img[:3] * alpha[None, ...] # print("alpha", torch.sum(alpha)) return blackimg, img, alpha def get_init_alpha(self): self.alphas = [] for i in self.id_list: vid = i _, _, alpha = self.read_img_path(self.image_paths[vid], self.opt.mvs_img_wh) self.alphas += [alpha[None, ...]] # self.alphas = np.stack(self.alphas).astype(np.float32) # (V, H, W) def get_init_item(self, idx, crop=False): if self.alphas is None: self.get_init_alpha() sample = {} init_view_num = self.opt.init_view_num view_ids = self.view_id_list[idx] if self.split == 'train': view_ids = view_ids[:init_view_num] affine_mat, affine_mat_inv = [], [] mvs_images, imgs, depths_h, alphas = [], [], [], [] proj_mats, intrinsics, w2cs, c2ws, near_fars = [], [], [], [], [] # record proj mats between views for i in view_ids: vid = i blackimg, img, alpha = self.read_img_path(self.image_paths[vid], self.opt.mvs_img_wh, black=True) mvs_images += [blackimg] alphas+= [alpha] imgs += [img] proj_mat_ls, near_far = self.proj_mats[vid] intrinsics.append(self.intrinsics[vid]) w2cs.append(self.world2cams[vid]) c2ws.append(self.cam2worlds[vid]) affine_mat.append(proj_mat_ls) affine_mat_inv.append(np.linalg.inv(proj_mat_ls)) near_fars.append(near_far) # print("idx",idx, vid, self.image_paths[vid]) for i in range(len(affine_mat)): view_proj_mats = [] ref_proj_inv = affine_mat_inv[i] for j in range(len(affine_mat)): if i == j: # reference view view_proj_mats += [np.eye(4)] else: view_proj_mats += [affine_mat[j] @ ref_proj_inv] # view_proj_mats: 4, 4, 4 view_proj_mats = np.stack(view_proj_mats) proj_mats.append(view_proj_mats[:, :3]) # (4, 4, 3, 4) proj_mats = np.stack(proj_mats) imgs = np.stack(imgs).astype(np.float32) mvs_images = np.stack(mvs_images).astype(np.float32) affine_mat, affine_mat_inv = np.stack(affine_mat), np.stack(affine_mat_inv) intrinsics, w2cs, c2ws, near_fars = np.stack(intrinsics), np.stack(w2cs), np.stack(c2ws), np.stack(near_fars) # view_ids_all = [target_view] + list(src_views) if type(src_views[0]) is not list else [j for sub in src_views for j in sub] # c2ws_all = self.cam2worlds[self.remap[view_ids_all]] sample['images'] = imgs # (V, 3, H, W) sample['mvs_images'] = mvs_images # (V, 3, H, W) # sample['depths_h'] = depths_h.astype(np.float32) # (V, H, W) sample['alphas'] = np.stack(alphas).astype(np.float32) # (V, H, W) sample['w2cs'] = w2cs.astype(np.float32) # (V, 4, 4) sample['c2ws'] = c2ws.astype(np.float32) # (V, 4, 4) sample['near_fars_depth'] = near_fars.astype(np.float32)[0] sample['near_fars'] = np.tile(self.near_far.astype(np.float32)[None,...],(len(near_fars),1)) sample['proj_mats'] = proj_mats.astype(np.float32) sample['intrinsics'] = intrinsics.astype(np.float32) # (V, 3, 3) sample['view_ids'] = np.array(view_ids) # sample['light_id'] = np.array(light_idx) sample['affine_mat'] = affine_mat sample['affine_mat_inv'] = affine_mat_inv # sample['scan'] = scan # sample['c2ws_all'] = c2ws_all.astype(np.float32) for key, value in sample.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) sample[key] = value.unsqueeze(0) return sample def __getitem__(self, id, crop=False, full_img=False): item = {} _, img, _ = self.read_img_path(self.image_paths[id], self.img_wh) w2c = self.world2cams[id] c2w = self.cam2worlds[id] intrinsic = self.intrinsic _, near_far = self.proj_mats[id] gt_image = np.transpose(img, (1,2,0)) # print("gt_image", gt_image.shape) width, height = gt_image.shape[1], gt_image.shape[0] camrot = (c2w[0:3, 0:3]) campos = c2w[0:3, 3] # print("camrot", camrot, campos) item["intrinsic"] = intrinsic # item["intrinsic"] = sample['intrinsics'][0, ...] item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() # @ FLIP_Z item["c2w"] = torch.from_numpy(c2w).float() item['lightpos'] = item["campos"] dist = np.linalg.norm(campos) middle = dist + 0.7 item['middle'] = torch.FloatTensor([middle]).view(1, 1) item['far'] = torch.FloatTensor([near_far[1]]).view(1, 1) item['near'] = torch.FloatTensor([near_far[0]]).view(1, 1) item['h'] = height item['w'] = width # item['depths_h'] = self.depths[id] # bounding box if full_img: item['images'] = img[None,...] subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, item["intrinsic"], camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() gt_image = gt_image[py.astype(np.int32), px.astype(np.int32)] # gt_mask = gt_mask[py.astype(np.int32), px.astype(np.int32), :] gt_image = np.reshape(gt_image, (-1, 3)) item['gt_image'] = gt_image item['id'] = id if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) return item def get_item(self, idx, crop=False, full_img=False): item = self.__getitem__(idx, crop=crop, full_img=full_img) for key, value in item.items(): if not isinstance(value, str): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item def get_dummyrot_item(self, idx, crop=False): item = {} width, height = self.width, self.height transform_matrix = self.render_poses[idx] camrot = transform_matrix[0:3, 0:3] campos = transform_matrix[0:3, 3] # focal = self.focal # item["focal"] = focal item["campos"] = torch.from_numpy(campos).float() item["camrotc2w"] = torch.from_numpy(camrot).float() item['lightpos'] = item["campos"] item['intrinsic'] = self.intrinsic # near far item['far'] = torch.FloatTensor([self.opt.far_plane]).view(1, 1) item['near'] = torch.FloatTensor([self.opt.near_plane]).view(1, 1) item['h'] = self.height item['w'] = self.width subsamplesize = self.opt.random_sample_size if self.opt.random_sample == "patch": indx = np.random.randint(0, width - subsamplesize + 1) indy = np.random.randint(0, height - subsamplesize + 1) px, py = np.meshgrid( np.arange(indx, indx + subsamplesize).astype(np.float32), np.arange(indy, indy + subsamplesize).astype(np.float32)) elif self.opt.random_sample == "random": px = np.random.randint(0, width, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.randint(0, height, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "random2": px = np.random.uniform(0, width - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) py = np.random.uniform(0, height - 1e-5, size=(subsamplesize, subsamplesize)).astype(np.float32) elif self.opt.random_sample == "proportional_random": raise Exception("no gt_mask, no proportional_random !!!") else: px, py = np.meshgrid( np.arange(width).astype(np.float32), np.arange(height).astype(np.float32)) pixelcoords = np.stack((px, py), axis=-1).astype(np.float32) # H x W x 2 # raydir = get_cv_raydir(pixelcoords, self.height, self.width, focal, camrot) item["pixel_idx"] = pixelcoords # print("pixelcoords", pixelcoords.reshape(-1,2)[:10,:]) raydir = get_dtu_raydir(pixelcoords, self.intrinsic, camrot, self.opt.dir_norm > 0) raydir = np.reshape(raydir, (-1, 3)) item['raydir'] = torch.from_numpy(raydir).float() item['id'] = idx if self.bg_color: if self.bg_color == 'random': val = np.random.rand() if val > 0.5: item['bg_color'] = torch.FloatTensor([1, 1, 1]) else: item['bg_color'] = torch.FloatTensor([0, 0, 0]) else: item['bg_color'] = torch.FloatTensor(self.bg_color) for key, value in item.items(): if not torch.is_tensor(value): value = torch.as_tensor(value) item[key] = value.unsqueeze(0) return item
31,438
39.82987
175
py
variational_dropout
variational_dropout-master/train.py
import argparse import torch as t import torch.nn as nn import torchvision.transforms as transforms from tensorboardX import SummaryWriter from torch.autograd import Variable from torch.optim import Adam from torchvision import datasets from models import * if __name__ == "__main__": parser = argparse.ArgumentParser(description='train') parser.add_argument('--num-epochs', type=int, default=60, metavar='NI', help='num epochs (default: 10)') parser.add_argument('--batch-size', type=int, default=70, metavar='BS', help='batch size (default: 70)') parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA', help='use cuda (default: False)') parser.add_argument('--learning-rate', type=float, default=0.0005, metavar='LR', help='learning rate (default: 0.0005)') parser.add_argument('--mode', type=str, default='vardropout', metavar='M', help='training mode (default: simple)') args = parser.parse_args() writer = SummaryWriter(args.mode) assert args.mode in ['simple', 'dropout', 'vardropout'], 'Invalid mode, should be in [simple, dropout, vardropout]' Model = { 'simple': SimpleModel, 'dropout': DropoutModel, 'vardropout': VariationalDropoutModel } Model = Model[args.mode] dataset = datasets.MNIST(root='data/', transform=transforms.Compose([ transforms.ToTensor()]), download=True, train=True) train_dataloader = t.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True) dataset = datasets.MNIST(root='data/', transform=transforms.Compose([ transforms.ToTensor()]), download=True, train=False) test_dataloader = t.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, drop_last=True) model = Model() if args.use_cuda: model.cuda() optimizer = Adam(model.parameters(), args.learning_rate, eps=1e-6) cross_enropy_averaged = nn.CrossEntropyLoss(size_average=True) for epoch in range(args.num_epochs): for iteration, (input, target) in enumerate(train_dataloader): input = Variable(input).view(-1, 784) target = Variable(target) if args.use_cuda: input, target = input.cuda(), target.cuda() optimizer.zero_grad() loss = None if args.mode == 'simple': loss = model.loss(input=input, target=target, average=True) elif args.mode == 'dropout': loss = model.loss(input=input, target=target, p=0.4, average=True) else: likelihood, kld = model.loss(input=input, target=target, train=True, average=True) coef = min(epoch / 40., 1.) loss = likelihood + kld * coef loss.backward() optimizer.step() if iteration % 50 == 0: print('train epoch {}, iteration {}, loss {}'.format(epoch, iteration, loss.cpu().data.numpy()[0])) if iteration % 100 == 0: loss = 0 for input, target in test_dataloader: input = Variable(input).view(-1, 784) target = Variable(target) if args.use_cuda: input, target = input.cuda(), target.cuda() if args.mode == 'simple': loss += model.loss(input=input, target=target, average=False).cpu().data.numpy()[0] elif args.mode == 'dropout': loss += model.loss(input=input, target=target, p=0., average=False).cpu().data.numpy()[0] else: loss += model.loss(input=input, target=target, train=False, average=False).cpu().data.numpy()[0] loss = loss / (args.batch_size * len(test_dataloader)) print('_____________') print('valid epoch {}, iteration {}'.format(epoch, iteration)) print('_____________') print(loss) print('_____________') writer.add_scalar('data/loss', loss, epoch * len(train_dataloader) + iteration) writer.close()
4,518
39.348214
120
py
variational_dropout
variational_dropout-master/models/dropout_model.py
import torch.nn as nn import torch.nn.functional as F class DropoutModel(nn.Module): def __init__(self): super(DropoutModel, self).__init__() self.fc = nn.ModuleList([ nn.Linear(784, 500), nn.Linear(500, 50), nn.Linear(50, 10) ]) def forward(self, input, p=0): """ :param input: An float tensor with shape of [batch_size, 784] :param p: An float value in [0, 1.] with probability of elements to be zeroed :return: An float tensor with shape of [batch_size, 10] filled with logits of likelihood """ result = input for i, layer in enumerate(self.fc): result = F.elu(layer(result)) if i < len(self.fc) - 1: result = F.dropout(result, p, training=True) return result def loss(self, **kwargs): out = self(kwargs['input'], kwargs['p']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average'])
1,009
27.857143
96
py
variational_dropout
variational_dropout-master/models/simple_model.py
import torch.nn as nn import torch.nn.functional as F class SimpleModel(nn.Module): def __init__(self): super(SimpleModel, self).__init__() self.fc = nn.Sequential( nn.Linear(784, 500), nn.ELU(), nn.Linear(500, 50), nn.ELU(), nn.Linear(50, 10) ) def forward(self, input): """ :param input: An float tensor with shape of [batch_size, 784] :return: An float tensor with shape of [batch_size, 10] filled with logits of likelihood """ return self.fc(input) def loss(self, **kwargs): out = self(kwargs['input']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average'])
744
24.689655
96
py
variational_dropout
variational_dropout-master/models/variational_dropout_model.py
import torch.nn as nn import torch.nn.functional as F from variational_dropout.variational_dropout import VariationalDropout class VariationalDropoutModel(nn.Module): def __init__(self): super(VariationalDropoutModel, self).__init__() self.fc = nn.ModuleList([ VariationalDropout(784, 500), VariationalDropout(500, 50), nn.Linear(50, 10) ]) def forward(self, input, train=False): """ :param input: An float tensor with shape of [batch_size, 784] :param train: An boolean value indicating whether forward propagation called when training is performed :return: An float tensor with shape of [batch_size, 10] filled with logits of likelihood and kld estimation """ result = input if train: kld = 0 for i, layer in enumerate(self.fc): if i != len(self.fc) - 1: result, kld = layer(result, train) result = F.elu(result) kld += kld return self.fc[-1](result), kld for i, layer in enumerate(self.fc): if i != len(self.fc) - 1: result = F.elu(layer(result, train)) return self.fc[-1](result) def loss(self, **kwargs): if kwargs['train']: out, kld = self(kwargs['input'], kwargs['train']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average']), kld out = self(kwargs['input'], kwargs['train']) return F.cross_entropy(out, kwargs['target'], size_average=kwargs['average'])
1,648
31.333333
111
py
variational_dropout
variational_dropout-master/variational_dropout/variational_dropout.py
import math import torch as t import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.nn.parameter import Parameter class VariationalDropout(nn.Module): def __init__(self, input_size, out_size, log_sigma2=-10, threshold=3): """ :param input_size: An int of input size :param log_sigma2: Initial value of log sigma ^ 2. It is crusial for training since it determines initial value of alpha :param threshold: Value for thresholding of validation. If log_alpha > threshold, then weight is zeroed :param out_size: An int of output size """ super(VariationalDropout, self).__init__() self.input_size = input_size self.out_size = out_size self.theta = Parameter(t.FloatTensor(input_size, out_size)) self.bias = Parameter(t.Tensor(out_size)) self.log_sigma2 = Parameter(t.FloatTensor(input_size, out_size).fill_(log_sigma2)) self.reset_parameters() self.k = [0.63576, 1.87320, 1.48695] self.threshold = threshold def reset_parameters(self): stdv = 1. / math.sqrt(self.out_size) self.theta.data.uniform_(-stdv, stdv) self.bias.data.uniform_(-stdv, stdv) @staticmethod def clip(input, to=8): input = input.masked_fill(input < -to, -to) input = input.masked_fill(input > to, to) return input def kld(self, log_alpha): first_term = self.k[0] * F.sigmoid(self.k[1] + self.k[2] * log_alpha) second_term = 0.5 * t.log(1 + t.exp(-log_alpha)) return -(first_term - second_term - self.k[0]).sum() / (self.input_size * self.out_size) def forward(self, input): """ :param input: An float tensor with shape of [batch_size, input_size] :return: An float tensor with shape of [batch_size, out_size] and negative layer-kld estimation """ log_alpha = self.clip(self.log_sigma2 - t.log(self.theta ** 2)) kld = self.kld(log_alpha) if not self.training: mask = log_alpha > self.threshold return t.addmm(self.bias, input, self.theta.masked_fill(mask, 0)) mu = t.mm(input, self.theta) std = t.sqrt(t.mm(input ** 2, self.log_sigma2.exp()) + 1e-6) eps = Variable(t.randn(*mu.size())) if input.is_cuda: eps = eps.cuda() return std * eps + mu + self.bias, kld def max_alpha(self): log_alpha = self.log_sigma2 - self.theta ** 2 return t.max(log_alpha.exp())
2,575
31.2
111
py
P3O
P3O-main/baselines/run.py
import sys import re import multiprocessing import os.path as osp import gym from collections import defaultdict import tensorflow as tf import numpy as np from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env from baselines.common.tf_util import get_session from baselines import logger from importlib import import_module from termcolor import colored # tf.keras.backend.set_floatx('float64') try: from mpi4py import MPI except ImportError: MPI = None try: import pybullet_envs except ImportError: pybullet_envs = None try: import roboschool except ImportError: roboschool = None _game_envs = defaultdict(set) for env in gym.envs.registry.all(): # TODO: solve this with regexes env_type = env.entry_point.split(':')[0].split('.')[-1] _game_envs[env_type].add(env.id) # reading benchmark names directly from retro requires # importing retro here, and for some reason that crashes tensorflow # in ubuntu _game_envs['retro'] = { 'BubbleBobble-Nes', 'SuperMarioBros-Nes', 'TwinBee3PokoPokoDaimaou-Nes', 'SpaceHarrier-Nes', 'SonicTheHedgehog-Genesis', 'Vectorman-Genesis', 'FinalFight-Snes', 'SpaceInvaders-Snes', } def train(args, extra_args): env_type, env_id = get_env_type(args) print('env_type: {}'.format(env_type)) total_timesteps = int(args.num_timesteps) seed = args.seed learn = get_learn_function(args.alg) alg_kwargs = get_learn_function_defaults(args.alg, env_type) alg_kwargs.update(extra_args) env = build_env(args) if args.save_video_interval != 0: env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length) if args.network: alg_kwargs['network'] = args.network else: if alg_kwargs.get('network') is None: alg_kwargs['network'] = get_default_network(env_type) print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs)) # eval_env = build_env(args) model = learn( env=env, seed=seed, total_timesteps=total_timesteps, # eval_env=eval_env, **alg_kwargs ) return model, env def build_env(args): ncpu = multiprocessing.cpu_count() if sys.platform == 'darwin': ncpu //= 2 nenv = args.num_env or ncpu alg = args.alg seed = args.seed env_type, env_id = get_env_type(args) if env_type in {'atari', 'retro'}: if alg == 'deepq': env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True}) elif alg == 'trpo_mpi': env = make_env(env_id, env_type, seed=seed) else: frame_stack_size = 4 env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale) env = VecFrameStack(env, frame_stack_size) else: config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) config.gpu_options.allow_growth = True get_session(config=config) flatten_dict_observations = alg not in {'her'} env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations) if env_type == 'mujoco': env = VecNormalize(env, use_tf=True) return env def get_env_type(args): env_id = args.env if args.env_type is not None: return args.env_type, env_id # Re-parse the gym registry, since we could have new envs since last time. for env in gym.envs.registry.all(): env_type = env.entry_point.split(':')[0].split('.')[-1] _game_envs[env_type].add(env.id) # This is a set so add is idempotent if env_id in _game_envs.keys(): env_type = env_id env_id = [g for g in _game_envs[env_type]][0] else: env_type = None for g, e in _game_envs.items(): if env_id in e: env_type = g break if ':' in env_id: env_type = re.sub(r':.*', '', env_id) assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys()) return env_type, env_id def get_default_network(env_type): if env_type in {'atari', 'retro'}: return 'cnn' else: return 'mlp' def get_alg_module(alg, submodule=None): submodule = submodule or alg try: # first try to import the alg module from baselines alg_module = import_module('.'.join(['baselines', alg, submodule])) except ImportError: # then from rl_algs alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule])) return alg_module def get_learn_function(alg): return get_alg_module(alg).learn def get_learn_function_defaults(alg, env_type): try: alg_defaults = get_alg_module(alg, 'defaults') kwargs = getattr(alg_defaults, env_type)() except (ImportError, AttributeError): kwargs = {} return kwargs def parse_cmdline_kwargs(args): ''' convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible ''' def parse(v): assert isinstance(v, str) try: return eval(v) except (NameError, SyntaxError): return v return {k: parse(v) for k,v in parse_unknown_args(args).items()} def configure_logger(log_path, **kwargs): if log_path is not None: logger.configure(log_path) else: logger.configure(**kwargs) def main(args): # configure logger, disable logging in child MPI processes (with rank > 0) arg_parser = common_arg_parser() args, unknown_args = arg_parser.parse_known_args(args) extra_args = parse_cmdline_kwargs(unknown_args) if MPI is None or MPI.COMM_WORLD.Get_rank() == 0: rank = 0 configure_logger(args.log_path) else: rank = MPI.COMM_WORLD.Get_rank() configure_logger(args.log_path, format_strs=[]) model, env = train(args, extra_args) if args.save_path is not None and rank == 0: save_path = osp.expanduser(args.save_path) model.save(save_path) if args.play: # replay = [] logger.log("Running trained model") obs = env.reset() state = model.initial_state if hasattr(model, 'initial_state') else None dones = np.zeros((1,)) # episode_rew = np.zeros(env.num_envs) if isinstance(env, VecEnv) else np.zeros(1) episode_num = 0 re = [] episode_rewards = [] test_run = 10 while test_run>0: if state is not None: actions, _, state, _ = model.step(obs,S=state, M=dones) else: actions, _, _, _ = model.step(obs) # replay.append([obs.tolist(),actions.tolist()]) obs, rew, done, info = env.step(actions) # this rew is modified by RunningMeanStd in the vec_normalize.py , use info.get('episode')['r'] to get episode reward # env.render() re.append(rew) done_any = done.any() if isinstance(done, np.ndarray) else done if done_any: for i in np.nonzero(done)[0]: episode_reward = info[i].get('episode')['r'] # print('episode_rew={}'.format(episode_reward)) episode_rewards.append(episode_reward) test_run -= 1 print(sum(re), len(re)) print("episode rewards",episode_rewards) print("average rewards",sum(episode_rewards) / len(episode_rewards)) # from os.path import expanduser # np.savez('/home/dachuang/workspace/extra_test/models/log_p3o',*replay) # with open(expanduser("~")+'/workspace/extra_test/models/first_log','w') as f: # f.write(str(replay)) env.close() return model if __name__ == '__main__': main(sys.argv)
8,357
29.50365
176
py
DeepForcedAligner
DeepForcedAligner-main/scratch_pred.py
import argparse import numpy as np import torch from dfa.audio import Audio from dfa.duration_extraction import extract_durations_with_dijkstra, extract_durations_beam from dfa.model import Aligner from dfa.text import Tokenizer from dfa.utils import read_metafile from dfa.utils import read_config from dfa.paths import Paths if __name__ == '__main__': parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.') parser.add_argument('--config', '-c', default='config.yaml', help='Points to the config file.') args = parser.parse_args() config = read_config(args.config) paths = Paths.from_config(config['paths']) checkpoint = torch.load('/Volumes/data/logs/dfa/latest_model.pt', map_location=torch.device('cpu')) config = checkpoint['config'] symbols = checkpoint['symbols'] audio = Audio.from_config(config['audio']) tokenizer = Tokenizer(symbols) model = Aligner.from_checkpoint(checkpoint).eval() print(f'model step {model.get_step()}') main_dir = paths.dataset_dir text_dict = read_metafile(paths.metadata_path) file_id = list(text_dict.keys())[0] text = text_dict[file_id] target = np.array(tokenizer(text)) mel = np.load((paths.mel_dir / file_id).with_suffix('.npy')) mel = torch.tensor(mel).float().unsqueeze(0) pred = model(mel) pred_max = pred[0].max(1)[1].detach().cpu().numpy().tolist() pred_text = tokenizer.decode(pred_max) pred = torch.softmax(pred, dim=-1) pred = pred.detach()[0].numpy() target_len = target.shape[0] pred_len = pred.shape[0] durations = extract_durations_with_dijkstra(target, pred) durations_beam, sequences = extract_durations_beam(target, pred, 5) expanded_string = ''.join([text[i] * dur for i, dur in enumerate(list(durations))]) print(text) print(pred_text) print(expanded_string) print(tokenizer.decode(target[sequences[0][0]])) print(tokenizer.decode(target[sequences[-1][0]])) print(durations) print(durations_beam[0]) print(durations_beam[-1])
2,103
34.661017
103
py
DeepForcedAligner
DeepForcedAligner-main/extract_durations.py
import argparse from multiprocessing import cpu_count from multiprocessing.pool import Pool from pathlib import Path from typing import Tuple import numpy as np import torch import tqdm from dfa.dataset import new_dataloader from dfa.duration_extraction import extract_durations_with_dijkstra, extract_durations_beam from dfa.model import Aligner from dfa.paths import Paths from dfa.text import Tokenizer from dfa.utils import read_config, to_device, unpickle_binary class Extractor: def __init__(self, method): self.method = method def extract_durations_for_item(self, item_file: Tuple[dict, Path, Path]) -> Tuple[dict, np.array]: item, token_file, pred_file = item_file tokens_len, mel_len = item['tokens_len'], item['mel_len'] tokens = np.load(str(token_file), allow_pickle=False).astype(np.int) tokens = tokens[:tokens_len] pred = np.load(str(pred_file), allow_pickle=False) pred = pred[:mel_len, :] if self.method == 'beam': durations, _ = extract_durations_beam(tokens, pred, 10) durations = durations[0] else: durations = extract_durations_with_dijkstra(tokens, pred) return item, durations if __name__ == '__main__': parser = argparse.ArgumentParser(description='Duration extraction for DeepForcedAligner.') parser.add_argument('--config', '-c', default='config2.yaml', type=str, help='Points to the config file.') parser.add_argument('--model', '-m', default=None, type=str, help='Points to the a model file to restore.') parser.add_argument('--target', '-t', default='outputs', type=str, help='Target path') parser.add_argument('--batch_size', '-b', default=8, type=int, help='Batch size for inference.') parser.add_argument('--num_workers', '-w', metavar='N', type=int, default=cpu_count() - 1, help='The number of worker threads to use for preprocessing') args = parser.parse_args() config = read_config(args.config) paths = Paths.from_config(config['paths']) model_path = args.model if args.model else paths.checkpoint_dir / 'latest_model.pt' args.target = '_'.join([str(paths.data_dir), str(args.target)]) print(f'Target dir: {args.target}') dur_target_dir, pred_target_dir = Path(args.target) / 'durations', Path(args.target) / 'predictions' dur_target_dir.mkdir(parents=True, exist_ok=True) pred_target_dir.mkdir(parents=True, exist_ok=True) print(f'Loading model from {model_path}') device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') checkpoint = torch.load(model_path, map_location=torch.device('cpu')) model = Aligner.from_checkpoint(checkpoint).eval().to(device) print(f'Loaded model with step {model.get_step()} on device: {device}') symbols = unpickle_binary(paths.data_dir / 'symbols.pkl') assert symbols == checkpoint['symbols'], 'Symbols from dataset do not match symbols from model checkpoint!' tokenizer = Tokenizer(symbols) dataloader = new_dataloader(dataset_path=paths.data_dir / 'dataset.pkl', mel_dir=paths.mel_dir, token_dir=paths.token_dir, batch_size=args.batch_size) print(f'Performing STT model inference...') for i, batch in tqdm.tqdm(enumerate(dataloader), total=len(dataloader)): tokens, mel, tokens_len, mel_len = to_device(batch, device) pred_batch = model(mel) for b in range(tokens.size(0)): this_mel_len = mel_len[b] pred = pred_batch[b, :this_mel_len, :] pred = torch.softmax(pred, dim=-1) pred = pred.detach().cpu().numpy() item_id = batch['item_id'][b] np.save(pred_target_dir / f'{item_id}.npy', pred, allow_pickle=False) print(f'Extracting durations...') dataset = unpickle_binary(paths.data_dir / 'dataset.pkl') item_files = [] for item in dataset: file_name = item['item_id'] + '.npy' token_file, pred_file = paths.token_dir / file_name, pred_target_dir / file_name item_files.append((item, token_file, pred_file)) pool = Pool(processes=args.num_workers) extr_fx = Extractor(method=config['durations']['method']).extract_durations_for_item mapper = pool.imap_unordered(extr_fx, item_files) for i, (item, durations) in tqdm.tqdm(enumerate(mapper), total=len(item_files)): item_id = item['item_id'] np.save(dur_target_dir / f'{item_id}.npy', durations, allow_pickle=False)
4,546
45.397959
111
py
DeepForcedAligner
DeepForcedAligner-main/train.py
import argparse import torch from torch import optim from dfa.model import Aligner from dfa.paths import Paths from dfa.utils import read_config, unpickle_binary from trainer import Trainer if __name__ == '__main__': parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.') parser.add_argument('--config', '-c', default='config.yaml', help='Points to the config file.') parser.add_argument('--checkpoint', '-cp', default=None, help='Points to the a model file to restore.') args = parser.parse_args() config = read_config(args.config) paths = Paths.from_config(config['paths']) symbols = unpickle_binary(paths.data_dir / 'symbols.pkl') if args.checkpoint: print(f'Restoring model from checkpoint: {args.checkpoint}') checkpoint = torch.load(args.checkpoint, map_location=torch.device('cpu')) model = Aligner.from_checkpoint(checkpoint) assert checkpoint['symbols'] == symbols, 'Symbols from data do not match symbols from model!' print(f'Restored model with step {model.get_step()}') else: model_path = paths.checkpoint_dir / 'latest_model.pt' if model_path.exists(): print(f'Restoring model from checkpoint: {model_path}') checkpoint = torch.load(model_path, map_location=torch.device('cpu')) model = Aligner.from_checkpoint(checkpoint) assert checkpoint['symbols'] == symbols, 'Symbols from data do not match symbols from model!' print(f'Restored model with step {model.get_step()}') else: print(f'Initializing new model from config {args.config}') model = Aligner(n_mels=config['audio']['n_mels'], num_symbols=len(symbols)+1, **config['model']) optim = optim.Adam(model.parameters(), lr=1e-4) checkpoint = {'model': model.state_dict(), 'optim': optim.state_dict(), 'config': config, 'symbols': symbols} trainer = Trainer(paths=paths) trainer.train(checkpoint, train_params=config['training'])
2,128
45.282609
107
py
DeepForcedAligner
DeepForcedAligner-main/preprocess.py
import argparse from multiprocessing import cpu_count from multiprocessing.pool import Pool from pathlib import Path from typing import Dict, Union import numpy as np import tqdm from dfa.audio import Audio from dfa.paths import Paths from dfa.text import Tokenizer from dfa.utils import get_files, read_config, pickle_binary, read_metafile class Preprocessor: """Performs mel extraction and tokenization and stores the resulting torch tensors.""" def __init__(self, audio: Audio, tokenizer: Tokenizer, paths: Paths, text_dict: Dict[str, str], mel_dim_last=True) -> None: self.audio = audio self.paths = paths self.tokenizer = tokenizer self.text_dict = text_dict self.mel_dim_last = mel_dim_last def __call__(self, file_path: Path) -> Dict[str, Union[str, int]]: item_id = file_path.stem if self.paths.precomputed_mels: mel = np.load(self.paths.precomputed_mels / f'{item_id}.npy') if not self.mel_dim_last: mel = mel.T assert mel.shape[1] == self.audio.n_mels, \ f'Expected mel shape to be of (None, {self.audio.n_mels}), but was: {mel.shape}! ' \ f'Consider setting config/audio/mel_dim_last: {not self.mel_dim_last}' else: wav = self.audio.load_wav(file_path) mel = self.audio.wav_to_mel(wav) np.save(self.paths.mel_dir / f'{item_id}.npy', mel, allow_pickle=False) text = self.text_dict[item_id] tokens = np.array(self.tokenizer(text)).astype(np.int32) np.save(self.paths.token_dir / f'{item_id}.npy', tokens, allow_pickle=False) return {'item_id': item_id, 'tokens_len': tokens.shape[0], 'mel_len': mel.shape[0]} if __name__ == '__main__': parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.') parser.add_argument('--config', '-c', help='Points to the config file.', default='config.yaml') parser.add_argument('--num_workers', '-w', metavar='N', type=int, default=cpu_count() - 1, help='The number of worker threads to use for preprocessing') args = parser.parse_args() config = read_config(args.config) paths = Paths.from_config(config['paths']) audio = Audio.from_config(config['audio']) mel_dim_last = config['preprocessing']['mel_dim_last'] print(f'Config: {args.config}\n' f'Target data directory: {paths.data_dir}') text_dict, audio_files = read_metafile(paths.metadata_path, paths.dataset_dir, paths.actual_dur_path) symbols = set() for text in text_dict.values(): symbols.update(set(text)) symbols = sorted(list(symbols)) audio_files = [x for x in audio_files if x.stem in text_dict] tokenizer = Tokenizer(symbols) preprocessor = Preprocessor(audio=audio, tokenizer=tokenizer, paths=paths, text_dict=text_dict, mel_dim_last=mel_dim_last) pool = Pool(processes=args.num_workers) mapper = pool.imap_unordered(preprocessor, audio_files) dataset = [] for i, item in tqdm.tqdm(enumerate(mapper), total=len(audio_files)): dataset.append(item) pickle_binary(dataset, paths.data_dir / 'dataset.pkl') pickle_binary(symbols, paths.data_dir / 'symbols.pkl') print('Preprocessing done.')
3,446
39.081395
105
py
DeepForcedAligner
DeepForcedAligner-main/trainer.py
import numpy as np import torch import tqdm from torch.nn import CTCLoss from torch.optim import Adam from torch.utils.tensorboard import SummaryWriter from dfa.dataset import new_dataloader, get_longest_mel_id from dfa.duration_extraction import extract_durations_with_dijkstra from dfa.model import Aligner from dfa.paths import Paths from dfa.text import Tokenizer from dfa.utils import to_device class Trainer: def __init__(self, paths: Paths) -> None: self.paths = paths self.writer = SummaryWriter(log_dir=paths.checkpoint_dir / 'tensorboard') self.ctc_loss = CTCLoss() # Used for generating plots longest_id = get_longest_mel_id(dataset_path=self.paths.data_dir / 'dataset.pkl') self.longest_mel = np.load(str(paths.mel_dir / f'{longest_id}.npy'), allow_pickle=False) self.longest_tokens = np.load(str(paths.token_dir / f'{longest_id}.npy'), allow_pickle=False) def train(self, checkpoint: dict, train_params: dict) -> None: lr = train_params['learning_rate'] epochs = train_params['epochs'] batch_size = train_params['batch_size'] ckpt_steps = train_params['checkpoint_steps'] plot_steps = train_params['plot_steps'] config = checkpoint['config'] symbols = checkpoint['symbols'] tokenizer = Tokenizer(symbols) device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') model = Aligner.from_checkpoint(checkpoint).to(device) optim = Adam(model.parameters()) optim.load_state_dict(checkpoint['optim']) for g in optim.param_groups: g['lr'] = lr dataloader = new_dataloader(dataset_path=self.paths.data_dir / 'dataset.pkl', mel_dir=self.paths.mel_dir, token_dir=self.paths.token_dir, batch_size=batch_size) start_epoch = model.get_step() // len(dataloader) for epoch in range(start_epoch + 1, epochs + 1): pbar = tqdm.tqdm(enumerate(dataloader, 1), total=len(dataloader)) for i, batch in pbar: tokens, mel, tokens_len, mel_len = to_device(batch, device) pred = model(mel) pred = pred.transpose(0, 1).log_softmax(2) loss = self.ctc_loss(pred, tokens, mel_len, tokens_len) pbar.set_description(desc=f'Epoch: {epoch} | Step {model.get_step()} ' f'| Loss: {loss:#.4}', refresh=True) if not torch.isnan(loss) and not torch.isinf(loss): optim.zero_grad() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optim.step() self.writer.add_scalar('CTC_Loss', loss.item(), global_step=model.get_step()) self.writer.add_scalar('Params/batch_size', batch_size, global_step=model.get_step()) self.writer.add_scalar('Params/learning_rate', lr, global_step=model.get_step()) if model.get_step() % ckpt_steps == 0: torch.save({'model': model.state_dict(), 'optim': optim.state_dict(), 'config': config, 'symbols': symbols}, self.paths.checkpoint_dir / f'model_step_{model.get_step() // 1000}k.pt') if model.get_step() % plot_steps == 0: self.generate_plots(model, tokenizer) latest_checkpoint = self.paths.checkpoint_dir / 'latest_model.pt' torch.save({'model': model.state_dict(), 'optim': optim.state_dict(), 'config': config, 'symbols': symbols}, latest_checkpoint) def generate_plots(self, model: Aligner, tokenizer: Tokenizer) -> None: model.eval() device = next(model.parameters()).device longest_mel = torch.tensor(self.longest_mel).unsqueeze(0).float().to(device) pred = model(longest_mel)[0].detach().cpu().softmax(dim=-1) durations = extract_durations_with_dijkstra(self.longest_tokens, pred.numpy()) pred_max = pred.max(1)[1].numpy().tolist() pred_text = tokenizer.decode(pred_max) target_text = tokenizer.decode(self.longest_tokens) target_duration_rep = ''.join(c * durations[i] for i, c in enumerate(target_text)) self.writer.add_text('Text/Prediction', ' ' + pred_text, global_step=model.get_step()) self.writer.add_text('Text/Target_Duration_Repeated', ' ' + target_duration_rep, global_step=model.get_step()) self.writer.add_text('Text/Target', ' ' + target_text, global_step=model.get_step()) model.train()
4,759
45.666667
113
py
DeepForcedAligner
DeepForcedAligner-main/dfa/utils.py
import pickle import os from pathlib import Path from typing import Dict, List, Any, Union import torch import yaml def read_metafile(path: str, folder, dur_path) -> Dict[str, str]: text_dict = {} txt_files = [] audio_files = [] print(path) for filename in os.listdir(folder): if filename.startswith(str(path)): txt_files.extend(get_files(os.path.join(folder, filename), '.txt')) audio_files.extend(get_files(os.path.join(folder, filename), '.wav')) for textfile in txt_files: with open(str(textfile), 'r') as f: line = f.read() text_dict[textfile.stem] = line mapping = { 'Hindi_M':'dur_hi_m', 'Hindi_F':'dur_hi_f', 'Telugu_M':'dur_te_m', 'Telugu_F':'dur_te_f', 'Marathi_M':'dur_mr_m', 'Marathi_F':'dur_mr_f', } with open(os.path.join(dur_path, mapping[path.stem]), 'r') as f: lines = f.read().split('\n')[:-1] lines = set([Path(l.split('\t')[0]).stem for l in lines if float(l.split('\t')[-1]) > 2]) text_dict = {t:text_dict[t] for t in text_dict if t in lines} return text_dict, audio_files def read_config(path: str) -> Dict[str, Any]: with open(path, 'r') as stream: config = yaml.load(stream, Loader=yaml.FullLoader) return config def save_config(config: Dict[str, Any], path: str) -> None: with open(path, 'w+', encoding='utf-8') as stream: yaml.dump(config, stream, default_flow_style=False) def get_files(path: str, extension='.wav') -> List[Path]: return list(Path(path).expanduser().resolve().rglob(f'*{extension}')) def pickle_binary(data: object, file: Union[str, Path]) -> None: with open(str(file), 'wb') as f: pickle.dump(data, f) def unpickle_binary(file: Union[str, Path]) -> Any: with open(str(file), 'rb') as f: return pickle.load(f) def to_device(batch: dict, device: torch.device) -> tuple: tokens, mel, tokens_len, mel_len = batch['tokens'], batch['mel'], \ batch['tokens_len'], batch['mel_len'] tokens, mel, tokens_len, mel_len = tokens.to(device), mel.to(device), \ tokens_len.to(device), mel_len.to(device) return tokens, mel, tokens_len, mel_len
2,358
33.188406
93
py
DeepForcedAligner
DeepForcedAligner-main/dfa/model.py
import torch import torch.nn as nn class BatchNormConv(nn.Module): def __init__(self, in_channels: int, out_channels: int, kernel_size: int): super().__init__() self.conv = nn.Conv1d( in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2, bias=False) self.bnorm = nn.BatchNorm1d(out_channels) self.relu = nn.ReLU() def forward(self, x): x = x.transpose(1, 2) x = self.conv(x) x = self.relu(x) x = self.bnorm(x) x = x.transpose(1, 2) return x class Aligner(torch.nn.Module): def __init__(self, n_mels: int, num_symbols: int, lstm_dim: int, conv_dim: int) -> None: super().__init__() self.register_buffer('step', torch.tensor(1, dtype=torch.int)) self.convs = nn.ModuleList([ BatchNormConv(n_mels, conv_dim, 5), BatchNormConv(conv_dim, conv_dim, 5), BatchNormConv(conv_dim, conv_dim, 5), ]) self.rnn = torch.nn.LSTM(conv_dim, lstm_dim, batch_first=True, bidirectional=True) self.lin = torch.nn.Linear(2 * lstm_dim, num_symbols) def forward(self, x): if self.train: self.step += 1 for conv in self.convs: x = conv(x) x, _ = self.rnn(x) x = self.lin(x) return x def get_step(self): return self.step.data.item() @classmethod def from_checkpoint(cls, checkpoint: dict) -> 'Aligner': config = checkpoint['config'] symbols = checkpoint['symbols'] model = Aligner(n_mels=config['audio']['n_mels'], num_symbols=len(symbols) + 1, **config['model']) model.load_state_dict(checkpoint['model']) return model
1,868
29.639344
90
py
DeepForcedAligner
DeepForcedAligner-main/dfa/dataset.py
from pathlib import Path from random import Random from typing import List import numpy as np import torch from torch.nn.utils.rnn import pad_sequence from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.sampler import Sampler from dfa.utils import unpickle_binary class AlignerDataset(Dataset): def __init__(self, item_ids: List[str], mel_dir: Path, token_dir: Path): super().__init__() self.item_ids = item_ids self.mel_dir = mel_dir self.token_dir = token_dir def __getitem__(self, index): item_id = self.item_ids[index] mel = np.load(str(self.mel_dir / f'{item_id}.npy'), allow_pickle=False) tokens = np.load(str(self.token_dir / f'{item_id}.npy'), allow_pickle=False) mel = torch.tensor(mel).float() tokens = torch.tensor(tokens).long() return {'item_id': item_id, 'tokens': tokens, 'mel': mel, 'tokens_len': tokens.size(0), 'mel_len': mel.size(0)} def __len__(self): return len(self.item_ids) # From https://github.com/fatchord/WaveRNN/blob/master/utils/dataset.py class BinnedLengthSampler(Sampler): def __init__(self, mel_lens: torch.tensor, batch_size: int, bin_size: int, seed=42): _, self.idx = torch.sort(torch.tensor(mel_lens)) self.batch_size = batch_size self.bin_size = bin_size self.random = Random(seed) assert self.bin_size % self.batch_size == 0 def __iter__(self): idx = self.idx.numpy() bins = [] for i in range(len(idx) // self.bin_size): this_bin = idx[i * self.bin_size:(i + 1) * self.bin_size] self.random.shuffle(this_bin) bins += [this_bin] self.random.shuffle(bins) binned_idx = np.stack(bins).reshape(-1) if len(binned_idx) < len(idx): last_bin = idx[len(binned_idx):] self.random.shuffle(last_bin) binned_idx = np.concatenate([binned_idx, last_bin]) return iter(torch.tensor(binned_idx).long()) def __len__(self): return len(self.idx) def collate_dataset(batch: List[dict]) -> torch.tensor: tokens = [b['tokens'] for b in batch] tokens = pad_sequence(tokens, batch_first=True, padding_value=0) mels = [b['mel'] for b in batch] mels = pad_sequence(mels, batch_first=True, padding_value=0) tokens_len = torch.tensor([b['tokens_len'] for b in batch]).long() mel_len = torch.tensor([b['mel_len'] for b in batch]).long() item_ids = [b['item_id'] for b in batch] return {'tokens': tokens, 'mel': mels, 'tokens_len': tokens_len, 'mel_len': mel_len, 'item_id': item_ids} def new_dataloader(dataset_path: Path, mel_dir: Path, token_dir: Path, batch_size=32) -> DataLoader: dataset = unpickle_binary(dataset_path) item_ids = [d['item_id'] for d in dataset] mel_lens = [d['mel_len'] for d in dataset] aligner_dataset = AlignerDataset(item_ids=item_ids, mel_dir=mel_dir, token_dir=token_dir) return DataLoader(aligner_dataset, collate_fn=collate_dataset, batch_size=batch_size, sampler=BinnedLengthSampler(mel_lens=mel_lens, batch_size=batch_size, bin_size=batch_size*3), num_workers=0, pin_memory=True) def get_longest_mel_id(dataset_path: Path) -> str: dataset = unpickle_binary(dataset_path) dataset.sort(key=lambda item: (item['mel_len'], item['item_id'])) return dataset[-1]['item_id']
3,653
36.670103
93
py
trx
trx-main/video_reader.py
import torch from torchvision import datasets, transforms from PIL import Image import os import zipfile import io import numpy as np import random import re import pickle from glob import glob from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter, RandomHorizontalFlip, CenterCrop, TenCrop from videotransforms.volume_transforms import ClipToTensor """Contains video frame paths and ground truth labels for a single split (e.g. train videos). """ class Split(): def __init__(self): self.gt_a_list = [] self.videos = [] def add_vid(self, paths, gt_a): self.videos.append(paths) self.gt_a_list.append(gt_a) def get_rand_vid(self, label, idx=-1): match_idxs = [] for i in range(len(self.gt_a_list)): if label == self.gt_a_list[i]: match_idxs.append(i) if idx != -1: return self.videos[match_idxs[idx]], match_idxs[idx] random_idx = np.random.choice(match_idxs) return self.videos[random_idx], random_idx def get_num_videos_for_class(self, label): return len([gt for gt in self.gt_a_list if gt == label]) def get_unique_classes(self): return list(set(self.gt_a_list)) def get_max_video_len(self): max_len = 0 for v in self.videos: l = len(v) if l > max_len: max_len = l return max_len def __len__(self): return len(self.gt_a_list) """Dataset for few-shot videos, which returns few-shot tasks. """ class VideoDataset(torch.utils.data.Dataset): def __init__(self, args): self.args = args self.get_item_counter = 0 self.data_dir = args.path self.seq_len = args.seq_len self.train = True self.tensor_transform = transforms.ToTensor() self.img_size = args.img_size self.annotation_path = args.traintestlist self.way=args.way self.shot=args.shot self.query_per_class=args.query_per_class self.train_split = Split() self.test_split = Split() self.setup_transforms() self._select_fold() self.read_dir() """Setup crop sizes/flips for augmentation during training and centre crop for testing""" def setup_transforms(self): video_transform_list = [] video_test_list = [] if self.img_size == 84: video_transform_list.append(Resize(96)) video_test_list.append(Resize(96)) elif self.img_size == 224: video_transform_list.append(Resize(256)) video_test_list.append(Resize(256)) else: print("img size transforms not setup") exit(1) video_transform_list.append(RandomHorizontalFlip()) video_transform_list.append(RandomCrop(self.img_size)) video_test_list.append(CenterCrop(self.img_size)) self.transform = {} self.transform["train"] = Compose(video_transform_list) self.transform["test"] = Compose(video_test_list) """Loads all videos into RAM from an uncompressed zip. Necessary as the filesystem has a large block size, which is unsuitable for lots of images. """ """Contains some legacy code for loading images directly, but this has not been used/tested for a while so might not work with the current codebase. """ def read_dir(self): # load zipfile into memory if self.data_dir.endswith('.zip'): self.zip = True zip_fn = os.path.join(self.data_dir) self.mem = open(zip_fn, 'rb').read() self.zfile = zipfile.ZipFile(io.BytesIO(self.mem)) else: self.zip = False # go through zip and populate splits with frame locations and action groundtruths if self.zip: dir_list = list(set([x for x in self.zfile.namelist() if '.jpg' not in x])) class_folders = list(set([x.split(os.sep)[-3] for x in dir_list if len(x.split(os.sep)) > 2])) class_folders.sort() self.class_folders = class_folders video_folders = list(set([x.split(os.sep)[-2] for x in dir_list if len(x.split(os.sep)) > 3])) video_folders.sort() self.video_folders = video_folders class_folders_indexes = {v: k for k, v in enumerate(self.class_folders)} video_folders_indexes = {v: k for k, v in enumerate(self.video_folders)} img_list = [x for x in self.zfile.namelist() if '.jpg' in x] img_list.sort() c = self.get_train_or_test_db(video_folders[0]) last_video_folder = None last_video_class = -1 insert_frames = [] for img_path in img_list: class_folder, video_folder, jpg = img_path.split(os.sep)[-3:] if video_folder != last_video_folder: if len(insert_frames) >= self.seq_len: c = self.get_train_or_test_db(last_video_folder.lower()) if c != None: c.add_vid(insert_frames, last_video_class) else: pass insert_frames = [] class_id = class_folders_indexes[class_folder] vid_id = video_folders_indexes[video_folder] insert_frames.append(img_path) last_video_folder = video_folder last_video_class = class_id c = self.get_train_or_test_db(last_video_folder) if c != None and len(insert_frames) >= self.seq_len: c.add_vid(insert_frames, last_video_class) else: class_folders = os.listdir(self.data_dir) class_folders.sort() self.class_folders = class_folders for class_folder in class_folders: video_folders = os.listdir(os.path.join(self.data_dir, class_folder)) video_folders.sort() if self.args.debug_loader: video_folders = video_folders[0:1] for video_folder in video_folders: c = self.get_train_or_test_db(video_folder) if c == None: continue imgs = os.listdir(os.path.join(self.data_dir, class_folder, video_folder)) if len(imgs) < self.seq_len: continue imgs.sort() paths = [os.path.join(self.data_dir, class_folder, video_folder, img) for img in imgs] paths.sort() class_id = class_folders.index(class_folder) c.add_vid(paths, class_id) print("loaded {}".format(self.data_dir)) print("train: {}, test: {}".format(len(self.train_split), len(self.test_split))) """ return the current split being used """ def get_train_or_test_db(self, split=None): if split is None: get_train_split = self.train else: if split in self.train_test_lists["train"]: get_train_split = True elif split in self.train_test_lists["test"]: get_train_split = False else: return None if get_train_split: return self.train_split else: return self.test_split """ load the paths of all videos in the train and test splits. """ def _select_fold(self): lists = {} for name in ["train", "test"]: fname = "{}list{:02d}.txt".format(name, self.args.split) f = os.path.join(self.annotation_path, fname) selected_files = [] with open(f, "r") as fid: data = fid.readlines() data = [x.replace(' ', '_').lower() for x in data] data = [x.strip().split(" ")[0] for x in data] data = [os.path.splitext(os.path.split(x)[1])[0] for x in data] if "kinetics" in self.args.path: data = [x[0:11] for x in data] selected_files.extend(data) lists[name] = selected_files self.train_test_lists = lists """ Set len to large number as we use lots of random tasks. Stopping point controlled in run.py. """ def __len__(self): c = self.get_train_or_test_db() return 1000000 return len(c) """ Get the classes used for the current split """ def get_split_class_list(self): c = self.get_train_or_test_db() classes = list(set(c.gt_a_list)) classes.sort() return classes """Loads a single image from a specified path """ def read_single_image(self, path): if self.zip: with self.zfile.open(path, 'r') as f: with Image.open(f) as i: i.load() return i else: with Image.open(path) as i: i.load() return i """Gets a single video sequence. Handles sampling if there are more frames than specified. """ def get_seq(self, label, idx=-1): c = self.get_train_or_test_db() paths, vid_id = c.get_rand_vid(label, idx) n_frames = len(paths) if n_frames == self.args.seq_len: idxs = [int(f) for f in range(n_frames)] else: if self.train: excess_frames = n_frames - self.seq_len excess_pad = int(min(5, excess_frames / 2)) if excess_pad < 1: start = 0 end = n_frames - 1 else: start = random.randint(0, excess_pad) end = random.randint(n_frames-1 -excess_pad, n_frames-1) else: start = 1 end = n_frames - 2 if end - start < self.seq_len: end = n_frames - 1 start = 0 else: pass idx_f = np.linspace(start, end, num=self.seq_len) idxs = [int(f) for f in idx_f] if self.seq_len == 1: idxs = [random.randint(start, end-1)] imgs = [self.read_single_image(paths[i]) for i in idxs] if (self.transform is not None): if self.train: transform = self.transform["train"] else: transform = self.transform["test"] imgs = [self.tensor_transform(v) for v in transform(imgs)] imgs = torch.stack(imgs) return imgs, vid_id """returns dict of support and target images and labels""" def __getitem__(self, index): #select classes to use for this task c = self.get_train_or_test_db() classes = c.get_unique_classes() batch_classes = random.sample(classes, self.way) if self.train: n_queries = self.args.query_per_class else: n_queries = self.args.query_per_class_test support_set = [] support_labels = [] target_set = [] target_labels = [] real_support_labels = [] real_target_labels = [] for bl, bc in enumerate(batch_classes): #select shots from the chosen classes n_total = c.get_num_videos_for_class(bc) idxs = random.sample([i for i in range(n_total)], self.args.shot + n_queries) for idx in idxs[0:self.args.shot]: vid, vid_id = self.get_seq(bc, idx) support_set.append(vid) support_labels.append(bl) for idx in idxs[self.args.shot:]: vid, vid_id = self.get_seq(bc, idx) target_set.append(vid) target_labels.append(bl) real_target_labels.append(bc) s = list(zip(support_set, support_labels)) random.shuffle(s) support_set, support_labels = zip(*s) t = list(zip(target_set, target_labels, real_target_labels)) random.shuffle(t) target_set, target_labels, real_target_labels = zip(*t) support_set = torch.cat(support_set) target_set = torch.cat(target_set) support_labels = torch.FloatTensor(support_labels) target_labels = torch.FloatTensor(target_labels) real_target_labels = torch.FloatTensor(real_target_labels) batch_classes = torch.FloatTensor(batch_classes) return {"support_set":support_set, "support_labels":support_labels, "target_set":target_set, "target_labels":target_labels, "real_target_labels":real_target_labels, "batch_class_list": batch_classes}
12,944
36.850877
207
py
trx
trx-main/utils.py
import torch import torch.nn.functional as F import os import math from enum import Enum import sys class TestAccuracies: """ Determines if an evaluation on the validation set is better than the best so far. In particular, this handles the case for meta-dataset where we validate on multiple datasets and we deem the evaluation to be better if more than half of the validation accuracies on the individual validation datsets are better than the previous best. """ def __init__(self, validation_datasets): self.datasets = validation_datasets self.dataset_count = len(self.datasets) # self.current_best_accuracy_dict = {} # for dataset in self.datasets: # self.current_best_accuracy_dict[dataset] = {"accuracy": 0.0, "confidence": 0.0} # def is_better(self, accuracies_dict): # is_better = False # is_better_count = 0 # for i, dataset in enumerate(self.datasets): # if accuracies_dict[dataset]["accuracy"] > self.current_best_accuracy_dict[dataset]["accuracy"]: # is_better_count += 1 # # if is_better_count >= int(math.ceil(self.dataset_count / 2.0)): # is_better = True # # return is_better # def replace(self, accuracies_dict): # self.current_best_accuracy_dict = accuracies_dict def print(self, logfile, accuracy_dict): print_and_log(logfile, "") # add a blank line print_and_log(logfile, "Test Accuracies:") for dataset in self.datasets: print_and_log(logfile, "{0:}: {1:.1f}+/-{2:.1f}".format(dataset, accuracy_dict[dataset]["accuracy"], accuracy_dict[dataset]["confidence"])) print_and_log(logfile, "") # add a blank line # def get_current_best_accuracy_dict(self): # return self.current_best_accuracy_dict def verify_checkpoint_dir(checkpoint_dir, resume, test_mode): if resume: # verify that the checkpoint directory and file exists if not os.path.exists(checkpoint_dir): print("Can't resume for checkpoint. Checkpoint directory ({}) does not exist.".format(checkpoint_dir), flush=True) sys.exit() checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pt') if not os.path.isfile(checkpoint_file): print("Can't resume for checkpoint. Checkpoint file ({}) does not exist.".format(checkpoint_file), flush=True) sys.exit() #elif test_mode: # if not os.path.exists(checkpoint_dir): # print("Can't test. Checkpoint directory ({}) does not exist.".format(checkpoint_dir), flush=True) # sys.exit() else: if os.path.exists(checkpoint_dir): print("Checkpoint directory ({}) already exits.".format(checkpoint_dir), flush=True) print("If starting a new training run, specify a directory that does not already exist.", flush=True) print("If you want to resume a training run, specify the -r option on the command line.", flush=True) sys.exit() def print_and_log(log_file, message): """ Helper function to print to the screen and the cnaps_layer_log.txt file. """ print(message, flush=True) log_file.write(message + '\n') def get_log_files(checkpoint_dir, resume, test_mode): """ Function that takes a path to a checkpoint directory and returns a reference to a logfile and paths to the fully trained model and the model with the best validation score. """ verify_checkpoint_dir(checkpoint_dir, resume, test_mode) #if not test_mode and not resume: if not resume: os.makedirs(checkpoint_dir) checkpoint_path_validation = os.path.join(checkpoint_dir, 'best_validation.pt') checkpoint_path_final = os.path.join(checkpoint_dir, 'fully_trained.pt') logfile_path = os.path.join(checkpoint_dir, 'log.txt') if os.path.isfile(logfile_path): logfile = open(logfile_path, "a", buffering=1) else: logfile = open(logfile_path, "w", buffering=1) return checkpoint_dir, logfile, checkpoint_path_validation, checkpoint_path_final def stack_first_dim(x): """ Method to combine the first two dimension of an array """ x_shape = x.size() new_shape = [x_shape[0] * x_shape[1]] if len(x_shape) > 2: new_shape += x_shape[2:] return x.view(new_shape) def split_first_dim_linear(x, first_two_dims): """ Undo the stacking operation """ x_shape = x.size() new_shape = first_two_dims if len(x_shape) > 1: new_shape += [x_shape[-1]] return x.view(new_shape) def sample_normal(mean, var, num_samples): """ Generate samples from a reparameterized normal distribution :param mean: tensor - mean parameter of the distribution :param var: tensor - variance of the distribution :param num_samples: np scalar - number of samples to generate :return: tensor - samples from distribution of size numSamples x dim(mean) """ sample_shape = [num_samples] + len(mean.size())*[1] normal_distribution = torch.distributions.Normal(mean.repeat(sample_shape), var.repeat(sample_shape)) return normal_distribution.rsample() def loss(test_logits_sample, test_labels, device): """ Compute the classification loss. """ size = test_logits_sample.size() sample_count = size[0] # scalar for the loop counter num_samples = torch.tensor([sample_count], dtype=torch.float, device=device, requires_grad=False) log_py = torch.empty(size=(size[0], size[1]), dtype=torch.float, device=device) for sample in range(sample_count): log_py[sample] = -F.cross_entropy(test_logits_sample[sample], test_labels, reduction='none') score = torch.logsumexp(log_py, dim=0) - torch.log(num_samples) return -torch.sum(score, dim=0) def aggregate_accuracy(test_logits_sample, test_labels): """ Compute classification accuracy. """ averaged_predictions = torch.logsumexp(test_logits_sample, dim=0) return torch.mean(torch.eq(test_labels, torch.argmax(averaged_predictions, dim=-1)).float()) def task_confusion(test_logits, test_labels, real_test_labels, batch_class_list): preds = torch.argmax(torch.logsumexp(test_logits, dim=0), dim=-1) real_preds = batch_class_list[preds] return real_preds def linear_classifier(x, param_dict): """ Classifier. """ return F.linear(x, param_dict['weight_mean'], param_dict['bias_mean'])
6,529
37.639053
126
py
trx
trx-main/model.py
import torch import torch.nn as nn from collections import OrderedDict from utils import split_first_dim_linear import math from itertools import combinations from torch.autograd import Variable import torchvision.models as models NUM_SAMPLES=1 class PositionalEncoding(nn.Module): "Implement the PE function." def __init__(self, d_model, dropout, max_len=5000, pe_scale_factor=0.1): super(PositionalEncoding, self).__init__() self.dropout = nn.Dropout(p=dropout) self.pe_scale_factor = pe_scale_factor # Compute the positional encodings once in log space. pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) * self.pe_scale_factor pe[:, 1::2] = torch.cos(position * div_term) * self.pe_scale_factor pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False) return self.dropout(x) class TemporalCrossTransformer(nn.Module): def __init__(self, args, temporal_set_size=3): super(TemporalCrossTransformer, self).__init__() self.args = args self.temporal_set_size = temporal_set_size max_len = int(self.args.seq_len * 1.5) self.pe = PositionalEncoding(self.args.trans_linear_in_dim, self.args.trans_dropout, max_len=max_len) self.k_linear = nn.Linear(self.args.trans_linear_in_dim * temporal_set_size, self.args.trans_linear_out_dim)#.cuda() self.v_linear = nn.Linear(self.args.trans_linear_in_dim * temporal_set_size, self.args.trans_linear_out_dim)#.cuda() self.norm_k = nn.LayerNorm(self.args.trans_linear_out_dim) self.norm_v = nn.LayerNorm(self.args.trans_linear_out_dim) self.class_softmax = torch.nn.Softmax(dim=1) # generate all tuples frame_idxs = [i for i in range(self.args.seq_len)] frame_combinations = combinations(frame_idxs, temporal_set_size) self.tuples = [torch.tensor(comb).cuda() for comb in frame_combinations] self.tuples_len = len(self.tuples) def forward(self, support_set, support_labels, queries): n_queries = queries.shape[0] n_support = support_set.shape[0] # static pe support_set = self.pe(support_set) queries = self.pe(queries) # construct new queries and support set made of tuples of images after pe s = [torch.index_select(support_set, -2, p).reshape(n_support, -1) for p in self.tuples] q = [torch.index_select(queries, -2, p).reshape(n_queries, -1) for p in self.tuples] support_set = torch.stack(s, dim=-2) queries = torch.stack(q, dim=-2) # apply linear maps support_set_ks = self.k_linear(support_set) queries_ks = self.k_linear(queries) support_set_vs = self.v_linear(support_set) queries_vs = self.v_linear(queries) # apply norms where necessary mh_support_set_ks = self.norm_k(support_set_ks) mh_queries_ks = self.norm_k(queries_ks) mh_support_set_vs = support_set_vs mh_queries_vs = queries_vs unique_labels = torch.unique(support_labels) # init tensor to hold distances between every support tuple and every target tuple all_distances_tensor = torch.zeros(n_queries, self.args.way).cuda() for label_idx, c in enumerate(unique_labels): # select keys and values for just this class class_k = torch.index_select(mh_support_set_ks, 0, self._extract_class_indices(support_labels, c)) class_v = torch.index_select(mh_support_set_vs, 0, self._extract_class_indices(support_labels, c)) k_bs = class_k.shape[0] class_scores = torch.matmul(mh_queries_ks.unsqueeze(1), class_k.transpose(-2,-1)) / math.sqrt(self.args.trans_linear_out_dim) # reshape etc. to apply a softmax for each query tuple class_scores = class_scores.permute(0,2,1,3) class_scores = class_scores.reshape(n_queries, self.tuples_len, -1) class_scores = [self.class_softmax(class_scores[i]) for i in range(n_queries)] class_scores = torch.cat(class_scores) class_scores = class_scores.reshape(n_queries, self.tuples_len, -1, self.tuples_len) class_scores = class_scores.permute(0,2,1,3) # get query specific class prototype query_prototype = torch.matmul(class_scores, class_v) query_prototype = torch.sum(query_prototype, dim=1) # calculate distances from queries to query-specific class prototypes diff = mh_queries_vs - query_prototype norm_sq = torch.norm(diff, dim=[-2,-1])**2 distance = torch.div(norm_sq, self.tuples_len) # multiply by -1 to get logits distance = distance * -1 c_idx = c.long() all_distances_tensor[:,c_idx] = distance return_dict = {'logits': all_distances_tensor} return return_dict @staticmethod def _extract_class_indices(labels, which_class): """ Helper method to extract the indices of elements which have the specified label. :param labels: (torch.tensor) Labels of the context set. :param which_class: Label for which indices are extracted. :return: (torch.tensor) Indices in the form of a mask that indicate the locations of the specified label. """ class_mask = torch.eq(labels, which_class) # binary mask of labels equal to which_class class_mask_indices = torch.nonzero(class_mask) # indices of labels equal to which class return torch.reshape(class_mask_indices, (-1,)) # reshape to be a 1D vector class CNN_TRX(nn.Module): """ Standard Resnet connected to a Temporal Cross Transformer. """ def __init__(self, args): super(CNN_TRX, self).__init__() self.train() self.args = args if self.args.method == "resnet18": resnet = models.resnet18(pretrained=True) elif self.args.method == "resnet34": resnet = models.resnet34(pretrained=True) elif self.args.method == "resnet50": resnet = models.resnet50(pretrained=True) last_layer_idx = -1 self.resnet = nn.Sequential(*list(resnet.children())[:last_layer_idx]) self.transformers = nn.ModuleList([TemporalCrossTransformer(args, s) for s in args.temp_set]) def forward(self, context_images, context_labels, target_images): context_features = self.resnet(context_images).squeeze() target_features = self.resnet(target_images).squeeze() dim = int(context_features.shape[1]) context_features = context_features.reshape(-1, self.args.seq_len, dim) target_features = target_features.reshape(-1, self.args.seq_len, dim) all_logits = [t(context_features, context_labels, target_features)['logits'] for t in self.transformers] all_logits = torch.stack(all_logits, dim=-1) sample_logits = all_logits sample_logits = torch.mean(sample_logits, dim=[-1]) return_dict = {'logits': split_first_dim_linear(sample_logits, [NUM_SAMPLES, target_features.shape[0]])} return return_dict def distribute_model(self): """ Distributes the CNNs over multiple GPUs. :return: Nothing """ if self.args.num_gpus > 1: self.resnet.cuda(0) self.resnet = torch.nn.DataParallel(self.resnet, device_ids=[i for i in range(0, self.args.num_gpus)]) self.transformers.cuda(0) if __name__ == "__main__": class ArgsObject(object): def __init__(self): self.trans_linear_in_dim = 512 self.trans_linear_out_dim = 128 self.way = 5 self.shot = 1 self.query_per_class = 5 self.trans_dropout = 0.1 self.seq_len = 8 self.img_size = 84 self.method = "resnet18" self.num_gpus = 1 self.temp_set = [2,3] args = ArgsObject() torch.manual_seed(0) device = 'cuda:0' model = CNN_TRX(args).to(device) support_imgs = torch.rand(args.way * args.shot * args.seq_len,3, args.img_size, args.img_size).to(device) target_imgs = torch.rand(args.way * args.query_per_class * args.seq_len ,3, args.img_size, args.img_size).to(device) support_labels = torch.tensor([0,1,2,3,4]).to(device) print("Support images input shape: {}".format(support_imgs.shape)) print("Target images input shape: {}".format(target_imgs.shape)) print("Support labels input shape: {}".format(support_imgs.shape)) out = model(support_imgs, support_labels, target_imgs) print("TRX returns the distances from each query to each class prototype. Use these as logits. Shape: {}".format(out['logits'].shape))
9,264
38.935345
140
py
trx
trx-main/run.py
import torch import numpy as np import argparse import os import pickle from utils import print_and_log, get_log_files, TestAccuracies, loss, aggregate_accuracy, verify_checkpoint_dir, task_confusion from model import CNN_TRX os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Quiet TensorFlow warnings import tensorflow as tf from torch.optim.lr_scheduler import MultiStepLR from torch.utils.tensorboard import SummaryWriter import torchvision import video_reader import random def main(): learner = Learner() learner.run() class Learner: def __init__(self): self.args = self.parse_command_line() self.checkpoint_dir, self.logfile, self.checkpoint_path_validation, self.checkpoint_path_final \ = get_log_files(self.args.checkpoint_dir, self.args.resume_from_checkpoint, False) print_and_log(self.logfile, "Options: %s\n" % self.args) print_and_log(self.logfile, "Checkpoint Directory: %s\n" % self.checkpoint_dir) self.writer = SummaryWriter() gpu_device = 'cuda' self.device = torch.device(gpu_device if torch.cuda.is_available() else 'cpu') self.model = self.init_model() self.train_set, self.validation_set, self.test_set = self.init_data() self.vd = video_reader.VideoDataset(self.args) self.video_loader = torch.utils.data.DataLoader(self.vd, batch_size=1, num_workers=self.args.num_workers) self.loss = loss self.accuracy_fn = aggregate_accuracy if self.args.opt == "adam": self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate) elif self.args.opt == "sgd": self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.learning_rate) self.test_accuracies = TestAccuracies(self.test_set) self.scheduler = MultiStepLR(self.optimizer, milestones=self.args.sch, gamma=0.1) self.start_iteration = 0 if self.args.resume_from_checkpoint: self.load_checkpoint() self.optimizer.zero_grad() def init_model(self): model = CNN_TRX(self.args) model = model.to(self.device) if self.args.num_gpus > 1: model.distribute_model() return model def init_data(self): train_set = [self.args.dataset] validation_set = [self.args.dataset] test_set = [self.args.dataset] return train_set, validation_set, test_set """ Command line parser """ def parse_command_line(self): parser = argparse.ArgumentParser() parser.add_argument("--dataset", choices=["ssv2", "kinetics", "hmdb", "ucf"], default="ssv2", help="Dataset to use.") parser.add_argument("--learning_rate", "-lr", type=float, default=0.001, help="Learning rate.") parser.add_argument("--tasks_per_batch", type=int, default=16, help="Number of tasks between parameter optimizations.") parser.add_argument("--checkpoint_dir", "-c", default=None, help="Directory to save checkpoint to.") parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.") parser.add_argument("--training_iterations", "-i", type=int, default=100020, help="Number of meta-training iterations.") parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False, action="store_true", help="Restart from latest checkpoint.") parser.add_argument("--way", type=int, default=5, help="Way of each task.") parser.add_argument("--shot", type=int, default=5, help="Shots per class.") parser.add_argument("--query_per_class", type=int, default=5, help="Target samples (i.e. queries) per class used for training.") parser.add_argument("--query_per_class_test", type=int, default=1, help="Target samples (i.e. queries) per class used for testing.") parser.add_argument('--test_iters', nargs='+', type=int, help='iterations to test at. Default is for ssv2 otam split.', default=[75000]) parser.add_argument("--num_test_tasks", type=int, default=10000, help="number of random tasks to test on.") parser.add_argument("--print_freq", type=int, default=1000, help="print and log every n iterations.") parser.add_argument("--seq_len", type=int, default=8, help="Frames per video.") parser.add_argument("--num_workers", type=int, default=10, help="Num dataloader workers.") parser.add_argument("--method", choices=["resnet18", "resnet34", "resnet50"], default="resnet50", help="method") parser.add_argument("--trans_linear_out_dim", type=int, default=1152, help="Transformer linear_out_dim") parser.add_argument("--opt", choices=["adam", "sgd"], default="sgd", help="Optimizer") parser.add_argument("--trans_dropout", type=int, default=0.1, help="Transformer dropout") parser.add_argument("--save_freq", type=int, default=5000, help="Number of iterations between checkpoint saves.") parser.add_argument("--img_size", type=int, default=224, help="Input image size to the CNN after cropping.") parser.add_argument('--temp_set', nargs='+', type=int, help='cardinalities e.g. 2,3 is pairs and triples', default=[2,3]) parser.add_argument("--scratch", choices=["bc", "bp"], default="bp", help="directory containing dataset, splits, and checkpoint saves.") parser.add_argument("--num_gpus", type=int, default=1, help="Number of GPUs to split the ResNet over") parser.add_argument("--debug_loader", default=False, action="store_true", help="Load 1 vid per class for debugging") parser.add_argument("--split", type=int, default=7, help="Dataset split.") parser.add_argument('--sch', nargs='+', type=int, help='iters to drop learning rate', default=[1000000]) args = parser.parse_args() if args.scratch == "bc": args.scratch = "/mnt/storage/home/tp8961/scratch" elif args.scratch == "bp": args.num_gpus = 4 # this is low becuase of RAM constraints for the data loader args.num_workers = 3 args.scratch = "/work/tp8961" if args.checkpoint_dir == None: print("need to specify a checkpoint dir") exit(1) if (args.method == "resnet50") or (args.method == "resnet34"): args.img_size = 224 if args.method == "resnet50": args.trans_linear_in_dim = 2048 else: args.trans_linear_in_dim = 512 if args.dataset == "ssv2": args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/somethingsomethingv2TrainTestlist") args.path = os.path.join(args.scratch, "video_datasets/data/somethingsomethingv2_256x256q5_7l8.zip") elif args.dataset == "kinetics": args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/kineticsTrainTestlist") args.path = os.path.join(args.scratch, "video_datasets/data/kinetics_256q5_1.zip") elif args.dataset == "ucf": args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/ucfTrainTestlist") args.path = os.path.join(args.scratch, "video_datasets/data/UCF-101_320.zip") elif args.dataset == "hmdb": args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/hmdb51TrainTestlist") args.path = os.path.join(args.scratch, "video_datasets/data/hmdb51_256q5.zip") return args def run(self): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True with tf.compat.v1.Session(config=config) as session: train_accuracies = [] losses = [] total_iterations = self.args.training_iterations iteration = self.start_iteration for task_dict in self.video_loader: if iteration >= total_iterations: break iteration += 1 torch.set_grad_enabled(True) task_loss, task_accuracy = self.train_task(task_dict) train_accuracies.append(task_accuracy) losses.append(task_loss) # optimize if ((iteration + 1) % self.args.tasks_per_batch == 0) or (iteration == (total_iterations - 1)): self.optimizer.step() self.optimizer.zero_grad() self.scheduler.step() if (iteration + 1) % self.args.print_freq == 0: # print training stats print_and_log(self.logfile,'Task [{}/{}], Train Loss: {:.7f}, Train Accuracy: {:.7f}' .format(iteration + 1, total_iterations, torch.Tensor(losses).mean().item(), torch.Tensor(train_accuracies).mean().item())) train_accuracies = [] losses = [] if ((iteration + 1) % self.args.save_freq == 0) and (iteration + 1) != total_iterations: self.save_checkpoint(iteration + 1) if ((iteration + 1) in self.args.test_iters) and (iteration + 1) != total_iterations: accuracy_dict = self.test(session) print(accuracy_dict) self.test_accuracies.print(self.logfile, accuracy_dict) # save the final model torch.save(self.model.state_dict(), self.checkpoint_path_final) self.logfile.close() def train_task(self, task_dict): context_images, target_images, context_labels, target_labels, real_target_labels, batch_class_list = self.prepare_task(task_dict) model_dict = self.model(context_images, context_labels, target_images) target_logits = model_dict['logits'] task_loss = self.loss(target_logits, target_labels, self.device) / self.args.tasks_per_batch task_accuracy = self.accuracy_fn(target_logits, target_labels) task_loss.backward(retain_graph=False) return task_loss, task_accuracy def test(self, session): self.model.eval() with torch.no_grad(): self.video_loader.dataset.train = False accuracy_dict ={} accuracies = [] iteration = 0 item = self.args.dataset for task_dict in self.video_loader: if iteration >= self.args.num_test_tasks: break iteration += 1 context_images, target_images, context_labels, target_labels, real_target_labels, batch_class_list = self.prepare_task(task_dict) model_dict = self.model(context_images, context_labels, target_images) target_logits = model_dict['logits'] accuracy = self.accuracy_fn(target_logits, target_labels) accuracies.append(accuracy.item()) del target_logits accuracy = np.array(accuracies).mean() * 100.0 confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies)) accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence} self.video_loader.dataset.train = True self.model.train() return accuracy_dict def prepare_task(self, task_dict, images_to_device = True): context_images, context_labels = task_dict['support_set'][0], task_dict['support_labels'][0] target_images, target_labels = task_dict['target_set'][0], task_dict['target_labels'][0] real_target_labels = task_dict['real_target_labels'][0] batch_class_list = task_dict['batch_class_list'][0] if images_to_device: context_images = context_images.to(self.device) target_images = target_images.to(self.device) context_labels = context_labels.to(self.device) target_labels = target_labels.type(torch.LongTensor).to(self.device) return context_images, target_images, context_labels, target_labels, real_target_labels, batch_class_list def shuffle(self, images, labels): """ Return shuffled data. """ permutation = np.random.permutation(images.shape[0]) return images[permutation], labels[permutation] def save_checkpoint(self, iteration): d = {'iteration': iteration, 'model_state_dict': self.model.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict()} torch.save(d, os.path.join(self.checkpoint_dir, 'checkpoint{}.pt'.format(iteration))) torch.save(d, os.path.join(self.checkpoint_dir, 'checkpoint.pt')) def load_checkpoint(self): checkpoint = torch.load(os.path.join(self.checkpoint_dir, 'checkpoint.pt')) self.start_iteration = checkpoint['iteration'] self.model.load_state_dict(checkpoint['model_state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.scheduler.load_state_dict(checkpoint['scheduler']) if __name__ == "__main__": main()
13,447
47.901818
168
py
trx
trx-main/videotransforms/stack_transforms.py
import numpy as np import PIL import torch from videotransforms.utils import images as imageutils class ToStackedTensor(object): """Converts a list of m (H x W x C) numpy.ndarrays in the range [0, 255] or PIL Images to a torch.FloatTensor of shape (m*C x H x W) in the range [0, 1.0] """ def __init__(self, channel_nb=3): self.channel_nb = channel_nb def __call__(self, clip): """ Args: clip (list of numpy.ndarray or PIL.Image.Image): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ch == self.channel_nb, 'got {} channels instead of 3'.format( ch) elif isinstance(clip[0], PIL.Image.Image): w, h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) np_clip = np.zeros([self.channel_nb * len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, PIL.Image.Image): img = np.array(img, copy=False) else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) img = imageutils.convert_img(img) np_clip[img_idx * self.channel_nb:( img_idx + 1) * self.channel_nb, :, :] = img tensor_clip = torch.from_numpy(np_clip) return tensor_clip.float().div(255)
1,699
33
80
py
trx
trx-main/videotransforms/volume_transforms.py
import numpy as np from PIL import Image import torch from videotransforms.utils import images as imageutils class ClipToTensor(object): """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] """ def __init__(self, channel_nb=3, div_255=True, numpy=False): self.channel_nb = channel_nb self.div_255 = div_255 self.numpy = numpy def __call__(self, clip): """ Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( ch) elif isinstance(clip[0], Image.Image): w, h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, Image.Image): img = np.array(img, copy=False) else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) img = imageutils.convert_img(img) np_clip[:, img_idx, :, :] = img if self.numpy: if self.div_255: np_clip = np_clip / 255 return np_clip else: tensor_clip = torch.from_numpy(np_clip) if not isinstance(tensor_clip, torch.FloatTensor): tensor_clip = tensor_clip.float() if self.div_255: tensor_clip = tensor_clip.div(255) return tensor_clip class ToTensor(object): """Converts numpy array to tensor """ def __call__(self, array): tensor = torch.from_numpy(array) return tensor
2,152
30.202899
81
py
trx
trx-main/videotransforms/functional.py
import numbers #import cv2 import numpy as np import PIL #from skimage.transform import resize import torchvision def crop_clip(clip, min_h, min_w, h, w): if isinstance(clip[0], np.ndarray): cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] elif isinstance(clip[0], PIL.Image.Image): cropped = [ img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip ] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return cropped def resize_clip(clip, size, interpolation='bilinear'): if isinstance(clip[0], np.ndarray): # if isinstance(size, numbers.Number): # im_h, im_w, im_c = clip[0].shape # # Min spatial dim already matches minimal size # if (im_w <= im_h and im_w == size) or (im_h <= im_w # and im_h == size): # return clip # new_h, new_w = get_resize_sizes(im_h, im_w, size) # size = (new_w, new_h) # else: # size = size[1], size[0] # if interpolation == 'bilinear': # np_inter = cv2.INTER_LINEAR # else: # np_inter = cv2.INTER_NEAREST # scaled = [ # cv2.resize(img, size, interpolation=np_inter) for img in clip # ] raise NotImplementedError elif isinstance(clip[0], PIL.Image.Image): if isinstance(size, numbers.Number): im_w, im_h = clip[0].size # Min spatial dim already matches minimal size if (im_w <= im_h and im_w == size) or (im_h <= im_w and im_h == size): return clip new_h, new_w = get_resize_sizes(im_h, im_w, size) size = (new_w, new_h) else: size = size[1], size[0] if interpolation == 'bilinear': pil_inter = PIL.Image.NEAREST else: pil_inter = PIL.Image.BILINEAR scaled = [img.resize(size, pil_inter) for img in clip] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return scaled def get_resize_sizes(im_h, im_w, size): if im_w < im_h: ow = size oh = int(size * im_h / im_w) else: oh = size ow = int(size * im_w / im_h) return oh, ow
2,493
32.702703
76
py
trx
trx-main/videotransforms/video_transforms.py
import numbers import random #import cv2 from matplotlib import pyplot as plt import numpy as np import PIL import scipy import torch import torchvision from . import functional as F class Compose(object): """Composes several transforms Args: transforms (list of ``Transform`` objects): list of transforms to compose """ def __init__(self, transforms): self.transforms = transforms def __call__(self, clip): for t in self.transforms: clip = t(clip) return clip class RandomHorizontalFlip(object): """Horizontally flip the list of given images randomly with a probability 0.5 """ def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Randomly flipped clip """ if random.random() < 0.5: if isinstance(clip[0], np.ndarray): return [np.fliplr(img) for img in clip] elif isinstance(clip[0], PIL.Image.Image): return [ img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip ] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + ' but got list of {0}'.format(type(clip[0]))) return clip class RandomResize(object): """Resizes a list of (H x W x C) numpy.ndarray to the final size The larger the original image is, the more times it takes to interpolate Args: interpolation (str): Can be one of 'nearest', 'bilinear' defaults to nearest size (tuple): (widht, height) """ def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): self.ratio = ratio self.interpolation = interpolation def __call__(self, clip): scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size new_w = int(im_w * scaling_factor) new_h = int(im_h * scaling_factor) new_size = (new_w, new_h) resized = F.resize_clip( clip, new_size, interpolation=self.interpolation) return resized class Resize(object): """Resizes a list of (H x W x C) numpy.ndarray to the final size The larger the original image is, the more times it takes to interpolate Args: interpolation (str): Can be one of 'nearest', 'bilinear' defaults to nearest size (tuple): (widht, height) """ def __init__(self, size, interpolation='nearest'): self.size = size self.interpolation = interpolation def __call__(self, clip): resized = F.resize_clip( clip, self.size, interpolation=self.interpolation) return resized class RandomCrop(object): """Extract random crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) x1 = random.randint(0, im_w - w) y1 = random.randint(0, im_h - h) cropped = F.crop_clip(clip, y1, x1, h, w) return cropped class RandomRotation(object): """Rotate entire clip randomly by a random angle within given bounds Args: degrees (sequence or int): Range of degrees to select from If degrees is a number instead of sequence like (min, max), the range of degrees, will be (-degrees, +degrees). """ def __init__(self, degrees): if isinstance(degrees, numbers.Number): if degrees < 0: raise ValueError('If degrees is a single number,' 'must be positive') degrees = (-degrees, degrees) else: if len(degrees) != 2: raise ValueError('If degrees is a sequence,' 'it must be of len 2.') self.degrees = degrees def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ angle = random.uniform(self.degrees[0], self.degrees[1]) if isinstance(clip[0], np.ndarray): rotated = [scipy.misc.imrotate(img, angle) for img in clip] elif isinstance(clip[0], PIL.Image.Image): rotated = [img.rotate(angle) for img in clip] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return rotated class CenterCrop(object): """Extract center crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) x1 = int(round((im_w - w) / 2.)) y1 = int(round((im_h - h) / 2.)) cropped = F.crop_clip(clip, y1, x1, h, w) return cropped class TenCrop(object): """Extract center crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) if isinstance(clip[0], np.ndarray): flip_clip = [np.fliplr(img) for img in clip] elif isinstance(clip[0], PIL.Image.Image): flip_clip = [img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + ' but got list of {0}'.format(type(clip[0]))) x1 = int(round((im_w - w) / 2.)) y1 = int(round((im_h - h) / 2.)) all_x = [x1] all_y = [y1] all_x.append(0) all_y.append(0) all_x.append(im_w - w) all_y.append(0) all_x.append(0) all_y.append(im_h - h) all_x.append(im_w - w) all_y.append(im_h - h) #cropped = F.crop_clip(clip, y1, x1, h, w) cropped = [F.crop_clip(clip, y, x, h, w) for x, y in zip(all_x, all_y)] flip_cropped = [F.crop_clip(flip_clip, y, x, h, w) for x, y in zip(all_x, all_y)] cropped.extend(flip_cropped) return cropped class ColorJitter(object): """Randomly change the brightness, contrast and saturation and hue of the clip Args: brightness (float): How much to jitter brightness. brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. contrast (float): How much to jitter contrast. contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. saturation (float): How much to jitter saturation. saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. hue(float): How much to jitter hue. hue_factor is chosen uniformly from [-hue, hue]. Should be >=0 and <= 0.5. """ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): self.brightness = brightness self.contrast = contrast self.saturation = saturation self.hue = hue def get_params(self, brightness, contrast, saturation, hue): if brightness > 0: brightness_factor = random.uniform( max(0, 1 - brightness), 1 + brightness) else: brightness_factor = None if contrast > 0: contrast_factor = random.uniform( max(0, 1 - contrast), 1 + contrast) else: contrast_factor = None if saturation > 0: saturation_factor = random.uniform( max(0, 1 - saturation), 1 + saturation) else: saturation_factor = None if hue > 0: hue_factor = random.uniform(-hue, hue) else: hue_factor = None return brightness_factor, contrast_factor, saturation_factor, hue_factor def __call__(self, clip): """ Args: clip (list): list of PIL.Image Returns: list PIL.Image : list of transformed PIL.Image """ if isinstance(clip[0], np.ndarray): raise TypeError( 'Color jitter not yet implemented for numpy arrays') elif isinstance(clip[0], PIL.Image.Image): brightness, contrast, saturation, hue = self.get_params( self.brightness, self.contrast, self.saturation, self.hue) # Create img transform function sequence img_transforms = [] if brightness is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) if saturation is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) if hue is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) if contrast is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) random.shuffle(img_transforms) # Apply to all images jittered_clip = [] for img in clip: for func in img_transforms: jittered_img = func(img) jittered_clip.append(jittered_img) else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return jittered_clip
13,108
31.44802
119
py
DDoS
DDoS-master/analyse_dataset.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import pandas as pd import torch import torch.autograd.profiler as profiler import torch.nn.functional as F from torch.cuda.amp import autocast from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets_dyn import SRDataset from utils.utilities import ResSaver __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/', help="Path to Dataset Folder.") ap.add_argument("-op", "--outpath", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path for Output.") ap.add_argument("-ot", "--outtype", default=r'StatTPinit_MickAbd3DDyn3conST_woZpad_full_Best', help="Type of Recon currently being performed.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTestDynConST', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTestDynConST', help="HighRes (Fully-sampled) Folder.") #hrTestDynPadded for ktGRASP ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-m", "--modelname", default="ZeroPadded", help="Model to Load for testing.") # ap.add_argument("-bst", "--beststring", default="best", help="Model to Load for testing.") # ap.add_argument("-mb", "--modelbest", type=int, default=1, help="Model to Load for testing.") ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.") # ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") # ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(3,3,3)', help="Stride of patches, to be used during validation") # ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") # ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.") # ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Pre-norm before saving the images and calculating the metrics.") ap.add_argument("-dus", "--detectus", type=int, default=0, help="Whether to replace the us using model name") #param to reproduce model # ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) # ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") # ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") # ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") # ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") # ap.add_argument("-mslvl", "--msslevel", type=int, default=2, help="(Only for Model ID 11) Depth of the Model.") # ap.add_argument("-msltn", "--msslatent", type=int, default=1, help="(Only for Model ID 11) Use the latent as one of the MSS level.") # ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") # ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=0, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") # ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") # ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) # ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") # ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") # ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") # ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") # ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") # ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") # ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") # ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") # #WnB related params # ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") # ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") # ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") # ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") # ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") # ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") # ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) # args.chkpoint = os.path.join(args.outpath, args.outfolder, args.modelname, args.modelname) # if args.modelbest: # print('best model testing') # args.chkpoint += "_" + args.beststring + ".pth.tar" # else: # args.chkpoint += ".pth.tar" # if args.patchstride: # args.modelname += "_infstr" + "c".join(list(map(str, args.patchstride))) # args.modelname = args.modelname.replace(args.usfolder+"_", "") # print("Testing: "+args.modelname) # if args.modelid == 2: # SRCNN3D = SRCNN3Dv2 # elif args.modelid == 3: # SRCNN3D = SRCNN3Dv3 # if args.medianloss: # loss_reducer = statistics.median # else: # loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' # log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', args.modelname) save_path = os.path.join(args.outpath, args.outfolder, args.modelname, args.outtype) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") # tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) # logname = os.path.join(args.homepath, 'testlog_'+args.modelname+'.txt') # logging.basicConfig(filename=logname, # filemode='a', # format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', # datefmt='%H:%M:%S', # level=logging.DEBUG) # # transforms = [tio.transforms.RescaleIntensity((0, 1))] # transforms = [] testDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, return_coords=True, pad_patch=False, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, noncumulative=True) #TODO implement patch_size_us if required - patch_size//scaling_factor test_loader = torch.utils.data.DataLoader(testDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) # model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) # if args.modelid == 0: # model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) # elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): # sys.exit("SRCNN3D is not ready for different numbers of input and output channel") # model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) # elif (args.modelid == 4) or (args.modelid == 5): # model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) # elif args.modelid == 6: # model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) # elif (args.modelid == 7) or (args.modelid == 8): # model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), # scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, # loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) # elif args.modelid == 9: # model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args # do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) # elif args.modelid == 10: # model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) # elif args.modelid == 11: # model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), # batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), # mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) # else: # sys.exit("Invalid Model ID") # if args.modelid == 5: # IsDeepSup = True # else: # IsDeepSup = False # if args.profile: # dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) # with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: # model(dummy) # prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) # model.to(device) # chk = torch.load(args.chkpoint, map_location=device) # model.load_state_dict(chk['state_dict']) # trained_epoch = chk['epoch'] # model.eval() saver = ResSaver(os.path.join(save_path, "Results"), save_inp=True, do_norm=args.prenorm) markers = {} inputs = {} results = {} targets = {} if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with torch.no_grad(): runningSSIM = [] test_ssim = [] test_metrics = [] for b, (lr_imgs, hr_imgs, start_coords, files, shapes, pad) in enumerate(tqdm(test_loader)): lr_imgs = lr_imgs[:,1,...].unsqueeze(1).contiguous().to(device, non_blocking=True) # (batch_size (N), 3, 24, 24), imagenet-normed hr_imgs = hr_imgs.contiguous()#.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1] pad = pad.numpy() lr_imgs = F.interpolate(lr_imgs, size=hr_imgs.shape[2:], mode='trilinear') tmp_in = lr_imgs.cpu().detach()#.numpy() tmp_tar = hr_imgs#.numpy() for i in range(hr_imgs.shape[0]): if bool(args.patchsize) and args.patchsize[0] != -1: #TODO: implement non-iso patch-size, now only using the first element if files[i] not in results: markers[files[i]] = np.zeros(shapes[i][0].numpy()) inputs[files[i]] = np.zeros(shapes[i][0].numpy()) results[files[i]] = np.zeros(shapes[i][0].numpy()) targets[files[i]] = np.zeros(shapes[i][0].numpy()) (startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() #because of moveaxis, l,w,d has become d,l,w if pad[i].any(): tin = F.pad(tmp_in[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() ttar = F.pad(tmp_tar[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() else: tin = tmp_in[i].squeeze().numpy() ttar = tmp_tar[i].squeeze().numpy() tin = tin[1,...] #TODO make it configurable. Currently its prevTPPatch, patch markers[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += 1 inputs[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tin, 0, -1) targets[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(ttar, 0, -1) else: inputs[files[i]] = np.moveaxis(tmp_in[i,0,...].squeeze().numpy(), 0, -1) #TODO make it configurable. Currently its prevTPPatch, patch targets[files[i]] = np.moveaxis(tmp_tar[i,0,...].squeeze().numpy(), 0, -1) if bool(args.patchsize) and args.patchsize[0] != -1: for f in inputs.keys(): inputs[f] = np.divide(inputs[f], markers[f]) results[f] = np.divide(results[f], markers[f]) targets[f] = np.divide(targets[f], markers[f]) for i, filename in enumerate(results.keys()): out = results[filename] inp = inputs[filename] gt = targets[filename] metrics = saver.CalcNSave(out, inp, gt, filename, already_numpy=True) if metrics is not None: metrics['file'] = filename test_metrics.append(metrics) ssim = round(metrics['SSIMOut'],4) test_ssim.append(ssim) runningSSIM.append(ssim) if len(test_metrics) > 0: df = pd.DataFrame.from_dict(test_metrics) df.to_csv(os.path.join(save_path, 'Results.csv'), index=False)
18,510
58.330128
239
py
DDoS
DDoS-master/train_DDoS_baseline_nondyn.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import torch import torch.autograd.profiler as profiler import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchio as tio from torch.cuda.amp import GradScaler, autocast from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets import SRDataset from utils.pLoss.perceptual_loss import PerceptualLoss from utils.utilities import getSSIM, tensorboard_images __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") # ap.add_argument("-ds", "--dataset", default=r'/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2/', help="Path to Dataset Folder.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTrain', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTrain', help="HighRes (Fully-sampled) Folder.") ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-ms", "--modelsuffix", default='fullBaselineNonDyn', help="Any Suffix To Add with the Model Name.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-cp", "--chkpoint", default=None, help="Checkpoint (of the current training) to Load.") ap.add_argument("-cpft", "--chkpointft", default=None, help="(To be used for Fine-Tuning) Checkpoint to Load for Fine-Tuning.") ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.") ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") ap.add_argument("-v", "--val", type=bool, default=True, help="Do Validation.") ap.add_argument("-vp", "--valdsper", type=float, default=0.3, help="Percentage of the DS to be used for Validation.") ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ep", "--epochs", type=int, default=100, help="Total Number of Epochs. To use Number of Iterations, set it to None") ap.add_argument("-it", "--iterations", type=int, default=1e6, help="Total Number of Iterations. To be used if number of Epochs is None") ap.add_argument("-lr", "--lr", type=float, default=1e-4, help="Total Number of Epochs.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(12,12,6)', help="Stride of patches, to be used during validation") ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-sf", "--savefreq", type=int, default=1, help="saving Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-mslvl", "--msslevel", type=int, default=1, help="(Only for Model ID 11) Depth of the Model.") ap.add_argument("-msltn", "--msslatent", type=int, default=0, help="(Only for Model ID 11) Use the latent as one of the MSS level.") ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=1, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Rescale intensities beteen 0 and 1") ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") #WnB related params ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) args.modelname = args.usfolder + "_" + modelIDs[args.modelid] + args.modelsuffix if args.modelid == 0 or args.modelid == 6 or args.modelid == 11: args.modelname += "do" + str(args.dropprob) + "dp" + str(args.mdepth) if args.modelid == 0 or args.modelid == 9 or args.modelid == 11: args.modelname += args.upmode if args.batchnorm: args.modelname += "BN" if args.modelid == 11: args.modelname += "MSS"+str(args.msslevel) args.modelname += "Latent" if args.msslatent else "NoLatent" args.modelname += args.mssup args.modelname += "InterpB4" if args.mssinterpb4 else "NoInterpB4" trainID = args.modelname + '_' + args.us + '_' + lossIDs[args.lossid] if args.lossid == 0: trainID += args.plosstyp + 'lvl' + str(args.plosslvl) if args.finetune: trainID += "_FT_lrdec" + str(args.lrdecrate) if args.fteprt: trainID += "_eprt" + str(args.fteprt) else: trainID += "_itrt" + str(args.ftitrt) print("Training: "+trainID) if args.modelid == 2: SRCNN3D = SRCNN3Dv2 elif args.modelid == 3: SRCNN3D = SRCNN3Dv3 if args.medianloss: loss_reducer = statistics.median else: loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', trainID) save_path = os.path.join(args.dataset, args.outfolder, trainID) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) logname = os.path.join(args.homepath, 'log_'+trainID+'.txt') logging.basicConfig(filename=logname, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) transforms = [] if not args.patchsize: transforms.append(tio.transforms.CropOrPad(target_shape=args.inshape)) trainDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, pad_patch=False) #TODO implement patch_size_us if required - patch_size//scaling_factor model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) if args.val: train_size = int((1-args.valdsper) * len(trainDS)) val_size = len(trainDS) - train_size trainDS, valDS = torch.utils.data.random_split(trainDS, [train_size, val_size]) else: valDS = None if bool(args.patchsize): args.inshape = args.patchsize train_loader = DataLoader(dataset=trainDS, batch_size=args.batchsize,shuffle=True, num_workers=args.nworkers, pin_memory=True) val_loader = None if not args.val else DataLoader(dataset=valDS,batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) if args.modelid == 0: model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): sys.exit("SRCNN3D is not ready for different numbers of input and output channel") model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) elif (args.modelid == 4) or (args.modelid == 5): model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) elif args.modelid == 6: model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) elif (args.modelid == 7) or (args.modelid == 8): model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) elif args.modelid == 9: model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) elif args.modelid == 10: model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) elif args.modelid == 11: model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) else: sys.exit("Invalid Model ID") if args.modelid == 5: IsDeepSup = True else: IsDeepSup = False if args.profile: dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: model(dummy) prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) args.lr = args.lr/args.lrdecrate optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) model.to(device) if args.lossid == 0: if args.outchannel != 1: sys.exit("Perceptual Loss used here only works for 1 channel images") loss_func = PerceptualLoss(device=device, loss_model="unet3Dds", resize=None, loss_type=args.plosstyp, n_level=args.plosslvl) elif args.lossid == 1: loss_func = nn.L1Loss(reduction='mean') elif args.lossid == 2: loss_func = MultiSSIM(data_range=1, n_channels=args.outchannel, reduction='mean').to(device) elif args.lossid == 3: loss_func = SSIM(data_range=1, channel=args.outchannel, spatial_dims=3).to(device) else: sys.exit("Invalid Loss ID") if (args.lossid == 0 and args.plosstyp == "L1") or (args.lossid == 1): IsNegLoss = False else: IsNegLoss = True if (args.modelid == 7) or (args.modelid == 8): model.loss_func = loss_func scaler = GradScaler(enabled=args.amp) if args.chkpoint: chk = torch.load(args.chkpoint, map_location=device) elif args.finetune: if args.chkpointft: chk = torch.load(args.chkpointft, map_location=device) else: sys.exit("Finetune can't be performed if chkpointft not supplied") else: chk = None start_epoch = 0 best_loss = float('-inf') if IsNegLoss else float('inf') if chk is not None: model.load_state_dict(chk['state_dict']) optimizer.load_state_dict(chk['optimizer']) scaler.load_state_dict(chk['AMPScaler']) best_loss = chk['best_loss'] start_epoch = chk['epoch'] + 1 iterations = chk['iterations'] main_train_epcoh = (chk['main_train_epoch'] + 1) if 'main_train_epoch' in chk else start_epoch #only be used for finetune if args.finetune: if args.fteprt: args.epochs = int((main_train_epcoh*(1+args.fteprt))) else: args.iterations = int(iterations*args.ftitrt) n_ft_ep = int(args.iterations // len(train_loader)) args.epochs = main_train_epcoh + n_ft_ep if args.epochs is None: args.epochs = int(args.iterations // len(train_loader) + 1) if start_epoch >= args.epochs: logging.error('Training should atleast be for one epoch. Adjusting to perform 1 epoch training') args.epochs = start_epoch+1 if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+trainID, id=args.wnbprefix+trainID, resume=True) as WnBRun: wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq) logging.info('Training Epochs: from {0} to {1}'.format(start_epoch, args.epochs-1)) for epoch in range(start_epoch, args.epochs): #Train model.train() runningLoss = [] train_loss = [] print('Epoch '+ str(epoch)+ ': Train') for i, (images, gt) in enumerate(tqdm(train_loader)): images = images[:, None, ...].to(device) gt = gt[:, None, ...].to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) elif type(model) is UNetMSS: out, mssout = model(images) loss = loss_func(out, gt) for mss in range(len(mssout)): loss += model.mss_coeff[mss] * loss_func(mssout[mss], gt) else: out = model(images) loss = loss_func(out, gt) if IsNegLoss: loss = -loss optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) train_loss.append(loss) runningLoss.append(loss) logging.info('[%d/%d][%d/%d] Train Loss: %.4f' % ((epoch+1), args.epochs, i, len(train_loader), loss)) del gt, out, loss torch.cuda.empty_cache() if i % args.logfreq == 0: niter = epoch*len(train_loader)+i tb_writer.add_scalar('Train/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "TrainLoss":loss_reducer(runningLoss)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'train') runningLoss = [] if args.finetune or (epoch % args.savefreq == 0): checkpoint = { 'epoch': epoch, 'iterations': (epoch+1)*len(train_loader), 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+".pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+".onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+".onnx") del images tb_writer.add_scalar('Train/EpochLoss', loss_reducer(train_loss), epoch) wandb.log({"TrainEpochLoss":loss_reducer(train_loss)})#, step=epoch) torch.cuda.empty_cache() #Validate if val_loader: model.eval() with torch.no_grad(): runningLoss = [] val_loss = [] runningAcc = [] val_acc = [] print('Epoch '+ str(epoch)+ ': Val') for i, (images, gt) in enumerate(tqdm(val_loader)): images = images[:, None, ...].to(device) gt = gt[:, None, ...].to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) else: out = model(images) loss = loss_func(out, gt) ssim = getSSIM(gt.detach().cpu().numpy(), out.detach().cpu().numpy(), data_range=1) loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) val_loss.append(loss) runningLoss.append(loss) val_acc.append(ssim) runningAcc.append(ssim) logging.info('[%d/%d][%d/%d] Val Loss: %.4f' % ((epoch+1), args.epochs, i, len(val_loader), loss)) del gt, out, loss torch.cuda.empty_cache() #For tensorboard if i % args.logfreq == 0: niter = epoch*len(val_loader)+i tb_writer.add_scalar('Val/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "ValLoss":loss_reducer(runningLoss)})#, step=niter) tb_writer.add_scalar('Val/SSIM', loss_reducer(runningAcc), niter) wandb.log({"Epoch":epoch, "ValSSIM":loss_reducer(runningAcc)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'val') runningLoss = [] runningAcc = [] if (loss_reducer(val_loss) < best_loss and not IsNegLoss) or (loss_reducer(val_loss) > best_loss and IsNegLoss): best_loss = loss_reducer(val_loss) WnBRun.summary["best_loss"] = best_loss checkpoint = { 'epoch': epoch, 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+"_best.pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+"_best.onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+"_best.onnx") del images tb_writer.add_scalar('Val/EpochLoss', loss_reducer(val_loss), epoch) wandb.log({"ValEpochLoss":loss_reducer(val_loss)})#, step=epoch) tb_writer.add_scalar('Val/EpochSSIM', loss_reducer(val_acc), epoch) wandb.log({"ValEpochSSIM":loss_reducer(val_acc)})#, step=epoch) torch.cuda.empty_cache()
26,386
53.972917
230
py
DDoS
DDoS-master/apply_DDoS_baseline.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import pandas as pd import torch import torch.autograd.profiler as profiler import torch.nn.functional as F from torch.cuda.amp import autocast from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets_dyn import SRDataset from utils.utilities import ResSaver __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/', help="Path to Dataset Folder.") ap.add_argument("-op", "--outpath", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path for Output.") ap.add_argument("-ot", "--outtype", default=r'StatTPinit_MickAbd3DDyn3conST_woZpad_full_Best', help="Type of Recon currently being performed.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTestDynConST', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTestDynConST', help="HighRes (Fully-sampled) Folder.") #hrTestDynPadded for ktGRASP ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-m", "--modelname", default="usTrain_UNETfullBaselinedo0.0dp3upsample_Center4MaskWoPad_pLossL1lvl3", help="Model to Load for testing.") ap.add_argument("-bst", "--beststring", default="best", help="Model to Load for testing.") ap.add_argument("-mb", "--modelbest", type=int, default=1, help="Model to Load for testing.") ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.") ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(3,3,3)', help="Stride of patches, to be used during validation") ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Pre-norm before saving the images and calculating the metrics.") ap.add_argument("-dus", "--detectus", type=int, default=0, help="Whether to replace the us using model name") #param to reproduce model ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") ap.add_argument("-mslvl", "--msslevel", type=int, default=2, help="(Only for Model ID 11) Depth of the Model.") ap.add_argument("-msltn", "--msslatent", type=int, default=1, help="(Only for Model ID 11) Use the latent as one of the MSS level.") ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=0, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") #WnB related params ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) args.chkpoint = os.path.join(args.outpath, args.outfolder, args.modelname, args.modelname) if args.modelbest: print('best model testing') args.chkpoint += "_" + args.beststring + ".pth.tar" else: args.chkpoint += ".pth.tar" if args.patchstride: args.modelname += "_infstr" + "c".join(list(map(str, args.patchstride))) args.modelname = args.modelname.replace(args.usfolder+"_", "") print("Testing: "+args.modelname) if args.modelid == 2: SRCNN3D = SRCNN3Dv2 elif args.modelid == 3: SRCNN3D = SRCNN3Dv3 if args.medianloss: loss_reducer = statistics.median else: loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', args.modelname) save_path = os.path.join(args.outpath, args.outfolder, args.modelname, args.outtype) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) logname = os.path.join(args.homepath, 'testlog_'+args.modelname+'.txt') logging.basicConfig(filename=logname, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) # transforms = [tio.transforms.RescaleIntensity((0, 1))] transforms = [] testDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, return_coords=True, pad_patch=False, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, noncumulative=True) #TODO implement patch_size_us if required - patch_size//scaling_factor test_loader = torch.utils.data.DataLoader(testDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) if args.modelid == 0: model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): sys.exit("SRCNN3D is not ready for different numbers of input and output channel") model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) elif (args.modelid == 4) or (args.modelid == 5): model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) elif args.modelid == 6: model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) elif (args.modelid == 7) or (args.modelid == 8): model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) elif args.modelid == 9: model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) elif args.modelid == 10: model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) elif args.modelid == 11: model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) else: sys.exit("Invalid Model ID") if args.modelid == 5: IsDeepSup = True else: IsDeepSup = False if args.profile: dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: model(dummy) prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) model.to(device) chk = torch.load(args.chkpoint, map_location=device) model.load_state_dict(chk['state_dict']) trained_epoch = chk['epoch'] model.eval() saver = ResSaver(os.path.join(save_path, "Results"), save_inp=True, do_norm=args.prenorm) markers = {} inputs = {} results = {} targets = {} if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with torch.no_grad(): runningSSIM = [] test_ssim = [] test_metrics = [] print('Epoch '+ str(trained_epoch)+ ': Test') with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+args.modelname, id=args.wnbprefix+args.modelname, resume=True) as WnBRun: wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq) for b, (lr_imgs, hr_imgs, start_coords, files, shapes, pad) in enumerate(tqdm(test_loader)): lr_imgs = lr_imgs[:,1,...].unsqueeze(1).contiguous().to(device, non_blocking=True) # (batch_size (N), 3, 24, 24), imagenet-normed hr_imgs = hr_imgs.contiguous()#.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1] pad = pad.numpy() with autocast(enabled=args.amp): if type(model) in (SRCNN3D, SRCNN3Dv2, SRCNN3Dv3): _, sr_imgs = model(lr_imgs) elif type(model) is UNetVSeg: sr_imgs, _, _ = model(lr_imgs) else: sr_imgs = model(lr_imgs) sr_imgs = sr_imgs.type(lr_imgs.dtype) sr_imgs = F.interpolate(sr_imgs, size=hr_imgs.shape[2:], mode='trilinear') lr_imgs = F.interpolate(lr_imgs, size=hr_imgs.shape[2:], mode='trilinear') tmp_in = lr_imgs.cpu().detach()#.numpy() tmp_res = sr_imgs.cpu().detach()#.numpy() tmp_tar = hr_imgs#.numpy() for i in range(hr_imgs.shape[0]): if bool(args.patchsize) and args.patchsize[0] != -1: #TODO: implement non-iso patch-size, now only using the first element if files[i] not in results: markers[files[i]] = np.zeros(shapes[i][0].numpy()) inputs[files[i]] = np.zeros(shapes[i][0].numpy()) results[files[i]] = np.zeros(shapes[i][0].numpy()) targets[files[i]] = np.zeros(shapes[i][0].numpy()) (startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() #because of moveaxis, l,w,d has become d,l,w if pad[i].any(): tin = F.pad(tmp_in[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() tres = F.pad(tmp_res[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() ttar = F.pad(tmp_tar[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() else: tin = tmp_in[i].squeeze().numpy() tres = tmp_res[i].squeeze().numpy() ttar = tmp_tar[i].squeeze().numpy() tin = tin[1,...] #TODO make it configurable. Currently its prevTPPatch, patch markers[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += 1 inputs[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tin, 0, -1) results[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tres, 0, -1) targets[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(ttar, 0, -1) else: inputs[files[i]] = np.moveaxis(tmp_in[i,0,...].squeeze().numpy(), 0, -1) #TODO make it configurable. Currently its prevTPPatch, patch results[files[i]] = np.moveaxis(tmp_res[i,0,...].squeeze().numpy(), 0, -1) targets[files[i]] = np.moveaxis(tmp_tar[i,0,...].squeeze().numpy(), 0, -1) if bool(args.patchsize) and args.patchsize[0] != -1: for f in inputs.keys(): inputs[f] = np.divide(inputs[f], markers[f]) results[f] = np.divide(results[f], markers[f]) targets[f] = np.divide(targets[f], markers[f]) for i, filename in enumerate(results.keys()): out = results[filename] inp = inputs[filename] gt = targets[filename] metrics = saver.CalcNSave(out, inp, gt, filename, already_numpy=True) if metrics is not None: metrics['file'] = filename test_metrics.append(metrics) ssim = round(metrics['SSIMOut'],4) test_ssim.append(ssim) runningSSIM.append(ssim) logging.info('[%d/%d] Test SSIM: %.4f' % (i, len(testDS), ssim)) #For tensorboard tb_writer.add_scalar('Test/SSIM', loss_reducer(runningSSIM), i) wandb.log({"TestEpoch":trained_epoch, "TestSSIM":loss_reducer(runningSSIM)})#, step=niter) runningSSIM = [] if len(test_metrics) > 0: print("Avg SSIM: "+str(loss_reducer(test_ssim))) WnBRun.summary["AvgTestSSIM"] = loss_reducer(test_ssim) df = pd.DataFrame.from_dict(test_metrics) df.to_csv(os.path.join(save_path, 'Results.csv'), index=False)
20,417
59.587537
239
py
DDoS
DDoS-master/apply_DDoS.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import pandas as pd import torch import torch.autograd.profiler as profiler import torch.nn.functional as F from torch.cuda.amp import autocast from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets_dyn import SRDataset from utils.utilities import ResSaver, process_DDoS_SRPrev __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/', help="Path to Dataset Folder.") ap.add_argument("-op", "--outpath", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path for Output.") ap.add_argument("-ot", "--outtype", default=r'StatTPinitCumulative_MickAbd3DDyn3conST_woZpad_full_Best', help="Type of Recon currently being performed.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTestDynConST', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTestDynConST', help="HighRes (Fully-sampled) Folder.") #hrTestDynPadded for ktGRASP ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-m", "--modelname", default="usTrain_UNETfulldo0.0dp3upsample_Center4MaskWoPad_pLossL1lvl3", help="Model to Load for testing.") ap.add_argument("-bst", "--beststring", default="best", help="Model to Load for testing.") ap.add_argument("-mb", "--modelbest", type=int, default=1, help="Model to Load for testing.") ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.") ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(3,3,3)', help="Stride of patches, to be used during validation") ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") ap.add_argument("-inc", "--inchannel", type=int, default=2, help="Number of Channels in the Data.") ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Pre-norm before saving the images and calculating the metrics.") ap.add_argument("-dus", "--detectus", type=int, default=0, help="Whether to replace the us using model name") #param to reproduce model ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") ap.add_argument("-mslvl", "--msslevel", type=int, default=2, help="(Only for Model ID 11) Depth of the Model.") ap.add_argument("-msltn", "--msslatent", type=int, default=1, help="(Only for Model ID 11) Use the latent as one of the MSS level.") ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=0, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") #WnB related params ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) args.chkpoint = os.path.join(args.outpath, args.outfolder, args.modelname, args.modelname) if args.modelbest: print('best model testing') args.chkpoint += "_" + args.beststring + ".pth.tar" else: args.chkpoint += ".pth.tar" if args.patchstride: args.modelname += "_infstr" + "c".join(list(map(str, args.patchstride))) args.modelname = args.modelname.replace(args.usfolder+"_", "") print("Testing: "+args.modelname) if args.modelid == 2: SRCNN3D = SRCNN3Dv2 elif args.modelid == 3: SRCNN3D = SRCNN3Dv3 if args.medianloss: loss_reducer = statistics.median else: loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', args.modelname) save_path = os.path.join(args.outpath, args.outfolder, args.modelname, args.outtype) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) logname = os.path.join(args.homepath, 'testlog_'+args.modelname+'.txt') logging.basicConfig(filename=logname, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) # transforms = [tio.transforms.RescaleIntensity((0, 1))] transforms = [] testDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, return_coords=True, pad_patch=False, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, noncumulative=False) #TODO implement patch_size_us if required - patch_size//scaling_factor tpIDs = sorted(testDS.data.tpID.unique()) DSs = [] for tp in tpIDs: indices = testDS.data.index[testDS.data.tpID == tp].tolist() dsOB = torch.utils.data.Subset(testDS, indices) DSs.append(dsOB) # test_loader = torch.utils.data.DataLoader(testDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) if args.modelid == 0: model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): sys.exit("SRCNN3D is not ready for different numbers of input and output channel") model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) elif (args.modelid == 4) or (args.modelid == 5): model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) elif args.modelid == 6: model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) elif (args.modelid == 7) or (args.modelid == 8): model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) elif args.modelid == 9: model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) elif args.modelid == 10: model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) elif args.modelid == 11: model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) else: sys.exit("Invalid Model ID") if args.modelid == 5: IsDeepSup = True else: IsDeepSup = False if args.profile: dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: model(dummy) prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) model.to(device) chk = torch.load(args.chkpoint, map_location=device) model.load_state_dict(chk['state_dict']) trained_epoch = chk['epoch'] model.eval() saver = ResSaver(os.path.join(save_path, "Results"), save_inp=True, do_norm=args.prenorm) markers = {} inputs = {} results = {} targets = {} if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with torch.no_grad(): runningSSIM = [] test_ssim = [] test_metrics = [] print('Epoch '+ str(trained_epoch)+ ': Test') with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+args.modelname, id=args.wnbprefix+args.modelname, resume=True) as WnBRun: wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq) SRPrev = None for tp, tpDS in enumerate(DSs): print(f"Testing TP{tp+1}") test_loader = torch.utils.data.DataLoader(tpDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) for b, (lr_imgs, hr_imgs, start_coords, files, shapes, pad) in enumerate(tqdm(test_loader)): if SRPrev is not None: #Means its not the initial TP and already super-resolved some timepoints. lr_imgs[:,0] = SRPrev lr_imgs = lr_imgs.contiguous().to(device, non_blocking=True) # (batch_size (N), 3, 24, 24), imagenet-normed hr_imgs = hr_imgs.contiguous()#.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1] pad = pad.numpy() with autocast(enabled=args.amp): if type(model) in (SRCNN3D, SRCNN3Dv2, SRCNN3Dv3): _, sr_imgs = model(lr_imgs) elif type(model) is UNetVSeg: sr_imgs, _, _ = model(lr_imgs) else: sr_imgs = model(lr_imgs) sr_imgs = sr_imgs.type(lr_imgs.dtype) sr_imgs = F.interpolate(sr_imgs, size=hr_imgs.shape[2:], mode='trilinear') lr_imgs = F.interpolate(lr_imgs, size=hr_imgs.shape[2:], mode='trilinear') tmp_in = lr_imgs.cpu().detach()#.numpy() tmp_res = sr_imgs.cpu().detach()#.numpy() tmp_tar = hr_imgs#.numpy() for i in range(hr_imgs.shape[0]): if bool(args.patchsize) and args.patchsize[0] != -1: #TODO: implement non-iso patch-size, now only using the first element if files[i] not in results: markers[files[i]] = np.zeros(shapes[i][0].numpy()) inputs[files[i]] = np.zeros(shapes[i][0].numpy()) results[files[i]] = np.zeros(shapes[i][0].numpy()) targets[files[i]] = np.zeros(shapes[i][0].numpy()) (startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() #because of moveaxis, l,w,d has become d,l,w if pad[i].any(): tin = F.pad(tmp_in[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() tres = F.pad(tmp_res[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() ttar = F.pad(tmp_tar[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy() else: tin = tmp_in[i].squeeze().numpy() tres = tmp_res[i].squeeze().numpy() ttar = tmp_tar[i].squeeze().numpy() tin = tin[1,...] #TODO make it configurable. Currently its prevTPPatch, patch markers[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += 1 inputs[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tin, 0, -1) results[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tres, 0, -1) targets[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(ttar, 0, -1) else: inputs[files[i]] = np.moveaxis(tmp_in[i,1,...].squeeze().numpy(), 0, -1) #TODO make it configurable. Currently its prevTPPatch, patch results[files[i]] = np.moveaxis(tmp_res[i,0,...].squeeze().numpy(), 0, -1) targets[files[i]] = np.moveaxis(tmp_tar[i,0,...].squeeze().numpy(), 0, -1) if bool(args.patchsize) and args.patchsize[0] != -1: for f in inputs.keys(): inputs[f] = np.divide(inputs[f], markers[f]) results[f] = np.divide(results[f], markers[f]) targets[f] = np.divide(targets[f], markers[f]) for i, filename in enumerate(results.keys()): out = results[filename] inp = inputs[filename] gt = targets[filename] metrics = saver.CalcNSave(out, inp, gt, filename, already_numpy=True) if metrics is not None: metrics['file'] = filename test_metrics.append(metrics) ssim = round(metrics['SSIMOut'],4) test_ssim.append(ssim) runningSSIM.append(ssim) logging.info('[%d/%d] Test SSIM: %.4f' % (i, len(testDS), ssim)) #For tensorboard tb_writer.add_scalar('Test/SSIM', loss_reducer(runningSSIM), i) wandb.log({"TestEpoch":trained_epoch, "TestSSIM":loss_reducer(runningSSIM)})#, step=niter) runningSSIM = [] if len(test_metrics) > 0: print("Avg SSIM: "+str(loss_reducer(test_ssim))) WnBRun.summary["AvgTestSSIM"] = loss_reducer(test_ssim) df = pd.DataFrame.from_dict(test_metrics) df.to_csv(os.path.join(save_path, 'Results.csv'), index=False)
21,258
59.566952
240
py
DDoS
DDoS-master/train_DDoS_baseline.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import torch import torch.autograd.profiler as profiler import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchio as tio from torch.cuda.amp import GradScaler, autocast from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets_dyn import SRDataset from utils.pLoss.perceptual_loss import PerceptualLoss from utils.utilities import getSSIM, tensorboard_images __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") # ap.add_argument("-ds", "--dataset", default=r'/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTrain', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTrain', help="HighRes (Fully-sampled) Folder.") ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-ms", "--modelsuffix", default='fullBaseline', help="Any Suffix To Add with the Model Name.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-cp", "--chkpoint", default=None, help="Checkpoint (of the current training) to Load.") ap.add_argument("-cpft", "--chkpointft", default=None, help="(To be used for Fine-Tuning) Checkpoint to Load for Fine-Tuning.") ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.") ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") ap.add_argument("-v", "--val", type=bool, default=True, help="Do Validation.") ap.add_argument("-vp", "--valdsper", type=float, default=0.3, help="Percentage of the DS to be used for Validation.") ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ep", "--epochs", type=int, default=100, help="Total Number of Epochs. To use Number of Iterations, set it to None") ap.add_argument("-it", "--iterations", type=int, default=1e6, help="Total Number of Iterations. To be used if number of Epochs is None") ap.add_argument("-lr", "--lr", type=float, default=1e-4, help="Total Number of Epochs.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(12,12,6)', help="Stride of patches, to be used during validation") ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-sf", "--savefreq", type=int, default=1, help="saving Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-mslvl", "--msslevel", type=int, default=1, help="(Only for Model ID 11) Depth of the Model.") ap.add_argument("-msltn", "--msslatent", type=int, default=0, help="(Only for Model ID 11) Use the latent as one of the MSS level.") ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=1, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Rescale intensities beteen 0 and 1") ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") #WnB related params ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) args.modelname = args.usfolder + "_" + modelIDs[args.modelid] + args.modelsuffix if args.modelid == 0 or args.modelid == 6 or args.modelid == 11: args.modelname += "do" + str(args.dropprob) + "dp" + str(args.mdepth) if args.modelid == 0 or args.modelid == 9 or args.modelid == 11: args.modelname += args.upmode if args.batchnorm: args.modelname += "BN" if args.modelid == 11: args.modelname += "MSS"+str(args.msslevel) args.modelname += "Latent" if args.msslatent else "NoLatent" args.modelname += args.mssup args.modelname += "InterpB4" if args.mssinterpb4 else "NoInterpB4" trainID = args.modelname + '_' + args.us + '_' + lossIDs[args.lossid] if args.lossid == 0: trainID += args.plosstyp + 'lvl' + str(args.plosslvl) if args.finetune: trainID += "_FT_lrdec" + str(args.lrdecrate) if args.fteprt: trainID += "_eprt" + str(args.fteprt) else: trainID += "_itrt" + str(args.ftitrt) print("Training: "+trainID) if args.modelid == 2: SRCNN3D = SRCNN3Dv2 elif args.modelid == 3: SRCNN3D = SRCNN3Dv3 if args.medianloss: loss_reducer = statistics.median else: loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', trainID) save_path = os.path.join(args.dataset, args.outfolder, trainID) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) logname = os.path.join(args.homepath, 'log_'+trainID+'.txt') logging.basicConfig(filename=logname, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) transforms = [] if not args.patchsize: transforms.append(tio.transforms.CropOrPad(target_shape=args.inshape)) trainDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=False, pad_patch=False) #TODO implement patch_size_us if required - patch_size//scaling_factor model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) if args.val: train_size = int((1-args.valdsper) * len(trainDS)) val_size = len(trainDS) - train_size trainDS, valDS = torch.utils.data.random_split(trainDS, [train_size, val_size]) else: valDS = None if bool(args.patchsize): args.inshape = args.patchsize train_loader = DataLoader(dataset=trainDS, batch_size=args.batchsize,shuffle=True, num_workers=args.nworkers, pin_memory=True) val_loader = None if not args.val else DataLoader(dataset=valDS,batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) if args.modelid == 0: model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): sys.exit("SRCNN3D is not ready for different numbers of input and output channel") model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) elif (args.modelid == 4) or (args.modelid == 5): model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) elif args.modelid == 6: model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) elif (args.modelid == 7) or (args.modelid == 8): model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) elif args.modelid == 9: model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) elif args.modelid == 10: model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) elif args.modelid == 11: model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) else: sys.exit("Invalid Model ID") if args.modelid == 5: IsDeepSup = True else: IsDeepSup = False if args.profile: dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: model(dummy) prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) args.lr = args.lr/args.lrdecrate optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) model.to(device) if args.lossid == 0: if args.outchannel != 1: sys.exit("Perceptual Loss used here only works for 1 channel images") loss_func = PerceptualLoss(device=device, loss_model="unet3Dds", resize=None, loss_type=args.plosstyp, n_level=args.plosslvl) elif args.lossid == 1: loss_func = nn.L1Loss(reduction='mean') elif args.lossid == 2: loss_func = MultiSSIM(data_range=1, n_channels=args.outchannel, reduction='mean').to(device) elif args.lossid == 3: loss_func = SSIM(data_range=1, channel=args.outchannel, spatial_dims=3).to(device) else: sys.exit("Invalid Loss ID") if (args.lossid == 0 and args.plosstyp == "L1") or (args.lossid == 1): IsNegLoss = False else: IsNegLoss = True if (args.modelid == 7) or (args.modelid == 8): model.loss_func = loss_func scaler = GradScaler(enabled=args.amp) if args.chkpoint: chk = torch.load(args.chkpoint, map_location=device) elif args.finetune: if args.chkpointft: chk = torch.load(args.chkpointft, map_location=device) else: sys.exit("Finetune can't be performed if chkpointft not supplied") else: chk = None start_epoch = 0 best_loss = float('-inf') if IsNegLoss else float('inf') if chk is not None: model.load_state_dict(chk['state_dict']) optimizer.load_state_dict(chk['optimizer']) scaler.load_state_dict(chk['AMPScaler']) best_loss = chk['best_loss'] start_epoch = chk['epoch'] + 1 iterations = chk['iterations'] main_train_epcoh = (chk['main_train_epoch'] + 1) if 'main_train_epoch' in chk else start_epoch #only be used for finetune if args.finetune: if args.fteprt: args.epochs = int((main_train_epcoh*(1+args.fteprt))) else: args.iterations = int(iterations*args.ftitrt) n_ft_ep = int(args.iterations // len(train_loader)) args.epochs = main_train_epcoh + n_ft_ep if args.epochs is None: args.epochs = int(args.iterations // len(train_loader) + 1) if start_epoch >= args.epochs: logging.error('Training should atleast be for one epoch. Adjusting to perform 1 epoch training') args.epochs = start_epoch+1 if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+trainID, id=args.wnbprefix+trainID, resume=True) as WnBRun: wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq) logging.info('Training Epochs: from {0} to {1}'.format(start_epoch, args.epochs-1)) for epoch in range(start_epoch, args.epochs): #Train model.train() runningLoss = [] train_loss = [] print('Epoch '+ str(epoch)+ ': Train') for i, (images, gt) in enumerate(tqdm(train_loader)): images = images[:,1,...].unsqueeze(1).to(device) gt = gt.to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) elif type(model) is UNetMSS: out, mssout = model(images) loss = loss_func(out, gt) for mss in range(len(mssout)): loss += model.mss_coeff[mss] * loss_func(mssout[mss], gt) else: out = model(images) loss = loss_func(out, gt) if IsNegLoss: loss = -loss optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) train_loss.append(loss) runningLoss.append(loss) logging.info('[%d/%d][%d/%d] Train Loss: %.4f' % ((epoch+1), args.epochs, i, len(train_loader), loss)) del gt, out, loss torch.cuda.empty_cache() if i % args.logfreq == 0: niter = epoch*len(train_loader)+i tb_writer.add_scalar('Train/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "TrainLoss":loss_reducer(runningLoss)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'train') runningLoss = [] if args.finetune or (epoch % args.savefreq == 0): checkpoint = { 'epoch': epoch, 'iterations': (epoch+1)*len(train_loader), 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+".pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+".onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+".onnx") del images tb_writer.add_scalar('Train/EpochLoss', loss_reducer(train_loss), epoch) wandb.log({"TrainEpochLoss":loss_reducer(train_loss)})#, step=epoch) torch.cuda.empty_cache() #Validate if val_loader: model.eval() with torch.no_grad(): runningLoss = [] val_loss = [] runningAcc = [] val_acc = [] print('Epoch '+ str(epoch)+ ': Val') for i, (images, gt) in enumerate(tqdm(val_loader)): images = images[:,1,...].unsqueeze(1).to(device) gt = gt.to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) else: out = model(images) loss = loss_func(out, gt) ssim = getSSIM(gt.detach().cpu().numpy(), out.detach().cpu().numpy(), data_range=1) loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) val_loss.append(loss) runningLoss.append(loss) val_acc.append(ssim) runningAcc.append(ssim) logging.info('[%d/%d][%d/%d] Val Loss: %.4f' % ((epoch+1), args.epochs, i, len(val_loader), loss)) del gt, out, loss torch.cuda.empty_cache() #For tensorboard if i % args.logfreq == 0: niter = epoch*len(val_loader)+i tb_writer.add_scalar('Val/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "ValLoss":loss_reducer(runningLoss)})#, step=niter) tb_writer.add_scalar('Val/SSIM', loss_reducer(runningAcc), niter) wandb.log({"Epoch":epoch, "ValSSIM":loss_reducer(runningAcc)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'val') runningLoss = [] runningAcc = [] if (loss_reducer(val_loss) < best_loss and not IsNegLoss) or (loss_reducer(val_loss) > best_loss and IsNegLoss): best_loss = loss_reducer(val_loss) WnBRun.summary["best_loss"] = best_loss checkpoint = { 'epoch': epoch, 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+"_best.pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+"_best.onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+"_best.onnx") del images tb_writer.add_scalar('Val/EpochLoss', loss_reducer(val_loss), epoch) wandb.log({"ValEpochLoss":loss_reducer(val_loss)})#, step=epoch) tb_writer.add_scalar('Val/EpochSSIM', loss_reducer(val_acc), epoch) wandb.log({"ValEpochSSIM":loss_reducer(val_acc)})#, step=epoch) torch.cuda.empty_cache()
26,396
53.99375
230
py
DDoS
DDoS-master/train_DDoS.py
import argparse import logging import math import os import random import statistics import sys import numpy as np import torch import torch.autograd.profiler as profiler import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchio as tio from torch.cuda.amp import GradScaler, autocast from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import wandb from models import * from models.ReconResNet import ResNet from models.ShuffleUNet.net import ShuffleUNet from models.ThisNewNet import ThisNewNet from utils.data import * from utils.datasets_dyn import SRDataset from utils.pLoss.perceptual_loss import PerceptualLoss from utils.utilities import getSSIM, tensorboard_images __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" modelIDs = { 0: "UNET", 1: "SRCNN", 2: "SRCNNv2", 3: "SRCNNv3", 4: "UNETvSeg", 5: "UNETvSegDS", 6: "DenseNet", 7: "UNETSRCNN", 8: "SRCNNUNET", 9: "ReconResNet", 10: "ShuffleUNet", 11: "UNETMSS", } lossIDs = { 0: "pLoss", 1: "MAE", 2: "MultiSSIM", 3: "SSIM3D" } def parseARGS(): ap = argparse.ArgumentParser() ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).") ap.add_argument("--seed", default=2020, type=int, help="Seed") # ap.add_argument("-ds", "--dataset", default=r'/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.") ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.") ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].") ap.add_argument("-uf", "--usfolder", default='usTrain', help="Undersampled Folder.") ap.add_argument("-hf", "--hrfolder", default='hrTrain', help="HighRes (Fully-sampled) Folder.") ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.") ap.add_argument("-ms", "--modelsuffix", default='full', help="Any Suffix To Add with the Model Name.") ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.") ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.") ap.add_argument("-cp", "--chkpoint", default=None, help="Checkpoint (of the current training) to Load.") ap.add_argument("-cpft", "--chkpointft", default=None, help="(To be used for Fine-Tuning) Checkpoint to Load for Fine-Tuning.") ap.add_argument("-c", "--cuda", type=bool, default=False, help="Use CUDA.") ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.") ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.") ap.add_argument("-v", "--val", type=bool, default=True, help="Do Validation.") ap.add_argument("-vp", "--valdsper", type=float, default=0.3, help="Percentage of the DS to be used for Validation.") ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.") ap.add_argument("-ep", "--epochs", type=int, default=100, help="Total Number of Epochs. To use Number of Iterations, set it to None") ap.add_argument("-it", "--iterations", type=int, default=1e6, help="Total Number of Iterations. To be used if number of Epochs is None") ap.add_argument("-lr", "--lr", type=float, default=1e-4, help="Total Number of Epochs.") ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.") ap.add_argument("-pst", "--patchstride", default='(12,12,6)', help="Stride of patches, to be used during validation") ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.") ap.add_argument("-sf", "--savefreq", type=int, default=1, help="saving Frequency.") ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).") ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs)) ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.") ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]") ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.") ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.") ap.add_argument("-inc", "--inchannel", type=int, default=2, help="Number of Channels in the Data.") ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.") ap.add_argument("-mslvl", "--msslevel", type=int, default=1, help="(Only for Model ID 11) Depth of the Model.") ap.add_argument("-msltn", "--msslatent", type=int, default=0, help="(Only for Model ID 11) Use the latent as one of the MSS level.") ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.") ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=1, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.") ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.") ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.") ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs)) ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.") ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.") ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired") ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).") ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.") ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.") ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.") ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Rescale intensities beteen 0 and 1") ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2") ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1") #WnB related params ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not") ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project") ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity") ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group") ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID") ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None") ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients") return ap.parse_args() args = parseARGS() # os.environ["TMPDIR"] = "/scratch/schatter/tmp" # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu torch.set_num_threads(1) random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if __name__ == "__main__" : args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(','))) args.homepath = os.path.expanduser("~/Documents") if args.patchsize: args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(','))) if args.patchstride: args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(','))) if args.inshape: args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(','))) args.modelname = args.usfolder + "_" + modelIDs[args.modelid] + args.modelsuffix if args.modelid == 0 or args.modelid == 6 or args.modelid == 11: args.modelname += "do" + str(args.dropprob) + "dp" + str(args.mdepth) if args.modelid == 0 or args.modelid == 9 or args.modelid == 11: args.modelname += args.upmode if args.batchnorm: args.modelname += "BN" if args.modelid == 11: args.modelname += "MSS"+str(args.msslevel) args.modelname += "Latent" if args.msslatent else "NoLatent" args.modelname += args.mssup args.modelname += "InterpB4" if args.mssinterpb4 else "NoInterpB4" trainID = args.modelname + '_' + args.us + '_' + lossIDs[args.lossid] if args.lossid == 0: trainID += args.plosstyp + 'lvl' + str(args.plosslvl) if args.finetune: trainID += "_FT_lrdec" + str(args.lrdecrate) if args.fteprt: trainID += "_eprt" + str(args.fteprt) else: trainID += "_itrt" + str(args.ftitrt) print("Training: "+trainID) if args.modelid == 2: SRCNN3D = SRCNN3Dv2 elif args.modelid == 3: SRCNN3D = SRCNN3Dv3 if args.medianloss: loss_reducer = statistics.median else: loss_reducer = statistics.mean dir_path = args.dataset + args.usfolder+ '/' + args.us + '/' label_dir_path = args.dataset + args.hrfolder + '/' log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', trainID) save_path = os.path.join(args.dataset, args.outfolder, trainID) device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") tb_writer = SummaryWriter(log_dir = log_path) os.makedirs(save_path, exist_ok=True) logname = os.path.join(args.homepath, 'log_'+trainID+'.txt') logging.basicConfig(filename=logname, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) transforms = [] if not args.patchsize: transforms.append(tio.transforms.CropOrPad(target_shape=args.inshape)) trainDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed patch_size_us=None, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=False, pad_patch=False) #TODO implement patch_size_us if required - patch_size//scaling_factor model_scale_factor=tuple(np.roll(args.scalefact,shift=1)) if args.val: train_size = int((1-args.valdsper) * len(trainDS)) val_size = len(trainDS) - train_size trainDS, valDS = torch.utils.data.random_split(trainDS, [train_size, val_size]) else: valDS = None if bool(args.patchsize): args.inshape = args.patchsize train_loader = DataLoader(dataset=trainDS, batch_size=args.batchsize,shuffle=True, num_workers=args.nworkers, pin_memory=True) val_loader = None if not args.val else DataLoader(dataset=valDS,batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True) if args.modelid == 0: model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob)) elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3): sys.exit("SRCNN3D is not ready for different numbers of input and output channel") model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures) elif (args.modelid == 4) or (args.modelid == 5): model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures) elif args.modelid == 6: model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob) elif (args.modelid == 7) or (args.modelid == 8): model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False, loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp) elif args.modelid == 9: model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True) elif args.modelid == 10: model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel) elif args.modelid == 11: model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob), mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4) else: sys.exit("Invalid Model ID") if args.modelid == 5: IsDeepSup = True else: IsDeepSup = False if args.profile: dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape) with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof: model(dummy) prof.export_chrome_trace(os.path.join(save_path, 'model_trace')) args.lr = args.lr/args.lrdecrate optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) model.to(device) if args.lossid == 0: if args.outchannel != 1: sys.exit("Perceptual Loss used here only works for 1 channel images") loss_func = PerceptualLoss(device=device, loss_model="unet3Dds", resize=None, loss_type=args.plosstyp, n_level=args.plosslvl) elif args.lossid == 1: loss_func = nn.L1Loss(reduction='mean') elif args.lossid == 2: loss_func = MultiSSIM(data_range=1, n_channels=args.outchannel, reduction='mean').to(device) elif args.lossid == 3: loss_func = SSIM(data_range=1, channel=args.outchannel, spatial_dims=3).to(device) else: sys.exit("Invalid Loss ID") if (args.lossid == 0 and args.plosstyp == "L1") or (args.lossid == 1): IsNegLoss = False else: IsNegLoss = True if (args.modelid == 7) or (args.modelid == 8): model.loss_func = loss_func scaler = GradScaler(enabled=args.amp) if args.chkpoint: chk = torch.load(args.chkpoint, map_location=device) elif args.finetune: if args.chkpointft: chk = torch.load(args.chkpointft, map_location=device) else: sys.exit("Finetune can't be performed if chkpointft not supplied") else: chk = None start_epoch = 0 best_loss = float('-inf') if IsNegLoss else float('inf') if chk is not None: model.load_state_dict(chk['state_dict']) optimizer.load_state_dict(chk['optimizer']) scaler.load_state_dict(chk['AMPScaler']) best_loss = chk['best_loss'] start_epoch = chk['epoch'] + 1 iterations = chk['iterations'] main_train_epcoh = (chk['main_train_epoch'] + 1) if 'main_train_epoch' in chk else start_epoch #only be used for finetune if args.finetune: if args.fteprt: args.epochs = int((main_train_epcoh*(1+args.fteprt))) else: args.iterations = int(iterations*args.ftitrt) n_ft_ep = int(args.iterations // len(train_loader)) args.epochs = main_train_epcoh + n_ft_ep if args.epochs is None: args.epochs = int(args.iterations // len(train_loader) + 1) if start_epoch >= args.epochs: logging.error('Training should atleast be for one epoch. Adjusting to perform 1 epoch training') args.epochs = start_epoch+1 if not args.wnbactive: os.environ["WANDB_MODE"] = "dryrun" with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+trainID, id=args.wnbprefix+trainID, resume=True) as WnBRun: wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq) logging.info('Training Epochs: from {0} to {1}'.format(start_epoch, args.epochs-1)) for epoch in range(start_epoch, args.epochs): #Train model.train() runningLoss = [] train_loss = [] print('Epoch '+ str(epoch)+ ': Train') for i, (images, gt) in enumerate(tqdm(train_loader)): images = images.to(device) gt = gt.to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) elif type(model) is UNetMSS: out, mssout = model(images) loss = loss_func(out, gt) for mss in range(len(mssout)): loss += model.mss_coeff[mss] * loss_func(mssout[mss], gt) else: out = model(images) loss = loss_func(out, gt) if IsNegLoss: loss = -loss optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) train_loss.append(loss) runningLoss.append(loss) logging.info('[%d/%d][%d/%d] Train Loss: %.4f' % ((epoch+1), args.epochs, i, len(train_loader), loss)) del gt, out, loss torch.cuda.empty_cache() if i % args.logfreq == 0: niter = epoch*len(train_loader)+i tb_writer.add_scalar('Train/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "TrainLoss":loss_reducer(runningLoss)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'train') runningLoss = [] if args.finetune or (epoch % args.savefreq == 0): checkpoint = { 'epoch': epoch, 'iterations': (epoch+1)*len(train_loader), 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+".pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+".onnx", input_names=["HRPrevTP+LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+".onnx") del images tb_writer.add_scalar('Train/EpochLoss', loss_reducer(train_loss), epoch) wandb.log({"TrainEpochLoss":loss_reducer(train_loss)})#, step=epoch) torch.cuda.empty_cache() #Validate if val_loader: model.eval() with torch.no_grad(): runningLoss = [] val_loss = [] runningAcc = [] val_acc = [] print('Epoch '+ str(epoch)+ ': Val') for i, (images, gt) in enumerate(tqdm(val_loader)): images = images.to(device) gt = gt.to(device) with autocast(enabled=args.amp): if type(model) is SRCNN3D: output1, output2 = model(images) loss1 = loss_func(output1, gt) loss2 = loss_func(output2, gt) loss = loss2 + loss1 elif type(model) is UNetVSeg: if IsDeepSup: sys.exit("Not Implimented yet") else: out, _, _ = model(images) loss = loss_func(out, gt) elif type(model) is ThisNewNet: out, loss = model(images, gt=gt) else: out = model(images) loss = loss_func(out, gt) ssim = getSSIM(gt.detach().cpu().numpy(), out.detach().cpu().numpy(), data_range=1) loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4) val_loss.append(loss) runningLoss.append(loss) val_acc.append(ssim) runningAcc.append(ssim) logging.info('[%d/%d][%d/%d] Val Loss: %.4f' % ((epoch+1), args.epochs, i, len(val_loader), loss)) del gt, out, loss torch.cuda.empty_cache() #For tensorboard if i % args.logfreq == 0: niter = epoch*len(val_loader)+i tb_writer.add_scalar('Val/Loss', loss_reducer(runningLoss), niter) wandb.log({"Epoch":epoch, "ValLoss":loss_reducer(runningLoss)})#, step=niter) tb_writer.add_scalar('Val/SSIM', loss_reducer(runningAcc), niter) wandb.log({"Epoch":epoch, "ValSSIM":loss_reducer(runningAcc)})#, step=niter) # tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'val') runningLoss = [] runningAcc = [] if (loss_reducer(val_loss) < best_loss and not IsNegLoss) or (loss_reducer(val_loss) > best_loss and IsNegLoss): best_loss = loss_reducer(val_loss) WnBRun.summary["best_loss"] = best_loss checkpoint = { 'epoch': epoch, 'best_loss': best_loss, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'AMPScaler': scaler.state_dict() } torch.save(checkpoint, os.path.join(save_path, trainID+"_best.pth.tar")) if args.modelid != 9 and args.modelid != 6: torch.onnx.export(model, images, trainID+"_best.onnx", input_names=["HRPrevTP+LRCurrTP"], output_names=["SuperResolvedCurrTP"]) wandb.save(trainID+"_best.onnx") del images tb_writer.add_scalar('Val/EpochLoss', loss_reducer(val_loss), epoch) wandb.log({"ValEpochLoss":loss_reducer(val_loss)})#, step=epoch) tb_writer.add_scalar('Val/EpochSSIM', loss_reducer(val_acc), epoch) wandb.log({"ValEpochSSIM":loss_reducer(val_acc)})#, step=epoch) torch.cuda.empty_cache()
26,365
53.929167
230
py
DDoS
DDoS-master/models/unet3DMSS.py
# Adapted from https://discuss.pytorch.org/t/unet-implementation/426 import torch from torch import nn import torch.nn.functional as F import torchcomplex.nn.functional as cF __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class UNetMSS(nn.Module): """ Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015) https://arxiv.org/abs/1505.04597 Using the default arguments will yield the exact version used in the original paper Args: in_channels (int): number of input channels n_classes (int): number of output channels depth (int): depth of the network wf (int): number of filters in the first layer is 2**wf padding (bool): if True, apply padding such that the input shape is the same as the output. This may introduce artifacts batch_norm (bool): Use BatchNorm after layers with an activation function up_mode (str): one of 'upconv' or 'upsample'. 'upconv' will use transposed convolutions for learned upsampling. 'upsample' will use bilinear upsampling. """ def __init__(self, in_channels=1, n_classes=1, depth=3, wf=6, padding=True, batch_norm=False, up_mode='upconv', dropout=False, mss_level=2, mss_fromlatent=True, mss_up="trilinear", mss_interpb4=False): super(UNetMSS, self).__init__() assert up_mode in ('upconv', 'upsample') self.padding = padding self.depth = depth self.dropout = nn.Dropout3d() if dropout else nn.Sequential() prev_channels = in_channels self.down_path = nn.ModuleList() up_out_features = [] for i in range(depth): self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i), padding, batch_norm)) prev_channels = 2**(wf+i) if mss_fromlatent: mss_features = [prev_channels] else: mss_features = [] self.up_path = nn.ModuleList() for i in reversed(range(depth - 1)): self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode, padding, batch_norm)) prev_channels = 2**(wf+i) up_out_features.append(prev_channels) self.last = nn.Conv3d(prev_channels, n_classes, kernel_size=1) mss_features += up_out_features[len(up_out_features)-1-mss_level if not mss_fromlatent else len(up_out_features)-1-mss_level+1:-1] self.mss_level = mss_level self.mss_up = mss_up self.mss_fromlatent = mss_fromlatent self.mss_interpb4 = mss_interpb4 self.mss_convs = nn.ModuleList() for i in range(self.mss_level): self.mss_convs.append(nn.Conv3d(mss_features[i], n_classes, kernel_size=1)) if self.mss_level == 1: self.mss_coeff = [0.5] else: lmbda = [] for i in range(self.mss_level-1, -1, -1): lmbda.append(2**i) self.mss_coeff = [] fact = 1.0 / sum(lmbda) for i in range(self.mss_level-1): self.mss_coeff.append(fact*lmbda[i]) self.mss_coeff.append(1.0 - sum(self.mss_coeff)) self.mss_coeff.reverse() def forward(self, x): blocks = [] for i, down in enumerate(self.down_path): x = down(x) if i != len(self.down_path)-1: blocks.append(x) x = F.avg_pool3d(x, 2) x = self.dropout(x) if self.mss_fromlatent: mss = [x] else: mss = [] for i, up in enumerate(self.up_path): x = up(x, blocks[-i-1]) if self.training and ((len(self.up_path)-1-i <= self.mss_level) and not(i+1 == len(self.up_path))): mss.append(x) if self.training: for i in range(len(mss)): if not self.mss_interpb4: mss[i] = F.interpolate(self.mss_convs[i](mss[i]), size=x.shape[2:], mode=self.mss_up) else: mss[i] = self.mss_convs[i](F.interpolate(mss[i], size=x.shape[2:], mode=self.mss_up)) return self.last(x), mss else: return self.last(x) class UNetConvBlock(nn.Module): def __init__(self, in_size, out_size, padding, batch_norm): super(UNetConvBlock, self).__init__() block = [] block.append(nn.Conv3d(in_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) block.append(nn.Conv3d(out_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) self.block = nn.Sequential(*block) def forward(self, x): out = self.block(x) return out class UNetUpBlock(nn.Module): def __init__(self, in_size, out_size, up_mode, padding, batch_norm): super(UNetUpBlock, self).__init__() if up_mode == 'upconv': self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=2, stride=2) elif up_mode == 'upsample': self.up = nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), nn.Conv3d(in_size, out_size, kernel_size=1)) self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm) def center_crop(self, layer, target_size): _, _, layer_depth, layer_height, layer_width = layer.size() diff_z = (layer_depth - target_size[0]) // 2 diff_y = (layer_height - target_size[1]) // 2 diff_x = (layer_width - target_size[2]) // 2 return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])] # _, _, layer_height, layer_width = layer.size() #for 2D data # diff_y = (layer_height - target_size[0]) // 2 # diff_x = (layer_width - target_size[1]) // 2 # return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])] def forward(self, x, bridge): up = self.up(x) # bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2 up = F.interpolate(up, size=bridge.shape[2:], mode='trilinear') out = torch.cat([up, bridge], 1) out = self.conv_block(out) return out
7,232
38.961326
128
py
DDoS
DDoS-master/models/SRCNN3Dv3.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee, Geetha Doddapaneni Gopinath" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Geetha Doddapaneni Gopinath"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class SRCNN3Dv3(nn.Module): def __init__(self,n_channels=1,scale_factor=2,num_features=32,kernel_size=3,stride=1): super(SRCNN3Dv3, self).__init__() if type(scale_factor) is int: self.scale_factor=(scale_factor,scale_factor,scale_factor) else: self.scale_factor=scale_factor self.n_channels=n_channels self.conv_1 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_2 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_3 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_4 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_5 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_6s = nn.ModuleList() self.conv_6s_post = nn.ModuleList() for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: out_features = np.prod(self.scale_factor[i:]) self.conv_6s.append(nn.Sequential(nn.Conv3d(num_features, out_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=out_features), nn.ReLU(inplace=True))) self.conv_6s_post.append(nn.Sequential(nn.Conv3d(out_features // self.scale_factor[i], num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True))) self.conv_7 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_8 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_9 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_10 = nn.Sequential(nn.Conv3d(num_features, n_channels, kernel_size, padding=kernel_size // 2), nn.Sigmoid()) def forward(self, image): output_1 = self.conv_1(image) output_2 = self.conv_2(output_1) output_3a = self.conv_3(output_2) output_3 = torch.add(output_1, output_3a) #torch.mul(output_1, 1) = output_1 #Note for Geetha output_4 = self.conv_4(output_3) output_5a = self.conv_5(output_4) output_5 = torch.add(output_1, output_5a) output_6 = output_5 mod_ind = 0 for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: output_6 = self.conv_6s[mod_ind](output_6) suffled_size = list(output_6.shape) suffled_size[1] //= self.scale_factor[i] suffled_size[2+i] *= self.scale_factor[i] output_6 = output_6.view(suffled_size) if i+1 < len(self.scale_factor): output_6 = self.conv_6s_post[mod_ind](output_6) mod_ind += 1 output_7 = self.conv_7(output_6) output_8 = self.conv_8(output_7) output_9a = self.conv_9(output_8) output_9 = torch.add(output_7, output_9a) output = self.conv_10(output_9) # Final Loss return output_6, output if __name__ == "__main__": tensor = torch.rand((2, 1, 24, 16, 16)).cuda() model = SRCNN3Dv2(scale_factor=(2,4,3)).cuda() model(tensor) # model = SRCNN3D(1,num_features=64,scale_factor=(2,1,1)).cuda() # from torchsummary import summary # summary(model, input_size=(1, 32, 32, 32))
5,034
53.728261
165
py
DDoS
DDoS-master/models/densenet.py
# Source: https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/models/densenet.py # Paper Ref: https://arxiv.org/abs/2004.04968 from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Untested" class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): super().__init__() self.add_module('norm1', nn.BatchNorm3d(num_input_features)) self.add_module('relu1', nn.ReLU(inplace=True)) self.add_module( 'conv1', nn.Conv3d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)) self.add_module('norm2', nn.BatchNorm3d(bn_size * growth_rate)) self.add_module('relu2', nn.ReLU(inplace=True)) self.add_module( 'conv2', nn.Conv3d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)) self.drop_rate = drop_rate def forward(self, x): new_features = super().forward(x) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return torch.cat([x, new_features], 1) class _DenseBlock(nn.Sequential): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): super().__init__() for i in range(num_layers): layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) self.add_module('denselayer{}'.format(i + 1), layer) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features, no_pool=True): super().__init__() self.add_module('norm', nn.BatchNorm3d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module( 'conv', nn.Conv3d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if not no_pool: self.add_module('pool', nn.AvgPool3d(kernel_size=2, stride=2)) class DenseNet(nn.Module): """Densenet-BC model class Args: growth_rate (int) - how many filters to add each layer (k in paper) block_config (list of 4 ints) - how many layers in each pooling block num_init_features (int) - the number of filters to learn in the first convolution layer bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes """ def __init__(self, n_input_channels=3, conv1_kernel=7, conv1_stride=1, no_pool=True, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000): super().__init__() # First convolution self.model = [('conv1', nn.Conv3d(n_input_channels, num_init_features, kernel_size=conv1_kernel, stride=conv1_stride, padding=conv1_kernel // 2, bias=False)), ('norm1', nn.BatchNorm3d(num_init_features)), ('relu1', nn.ReLU(inplace=True))] if not no_pool: self.model.append( ('pool1', nn.MaxPool3d(kernel_size=3, stride=2, padding=1))) self.model = nn.Sequential(OrderedDict(self.model)) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) self.model.add_module('denseblock{}'.format(i + 1), block) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, no_pool=no_pool) self.model.add_module('transition{}'.format(i + 1), trans) num_features = num_features // 2 # Final batch norm self.model.add_module('norm5', nn.BatchNorm3d(num_features)) # Final fully connected layer self.model.add_module('finconv', nn.Conv3d(num_features, num_classes, kernel_size=1, stride=1, padding=0)) for m in self.modules(): if isinstance(m, nn.Conv3d): m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out') elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def forward(self, x): return self.model(x) def generate_model(model_depth, **kwargs): assert model_depth in [121, 169, 201, 264] if model_depth == 121: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs) elif model_depth == 169: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 32, 32), **kwargs) elif model_depth == 201: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 48, 32), **kwargs) elif model_depth == 264: model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 64, 48), **kwargs) return model
7,339
37.631579
114
py
DDoS
DDoS-master/models/SRCNN3D.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee, Geetha Doddapaneni Gopinath" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Geetha Doddapaneni Gopinath"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class SRCNN3D(nn.Module): def __init__(self,n_channels=1,scale_factor=2,num_features=32,kernel_size=3,stride=1): super(SRCNN3D, self).__init__() if type(scale_factor) is int: self.scale_factor=(scale_factor,scale_factor,scale_factor) else: self.scale_factor=scale_factor # n_dim_upscale = 0 # for f in self.scale_factor: # if f > 1: # n_dim_upscale += 1 #This will only work for scale factor of 2 in any num of dims TODO # activation_maps = 2 ** n_dim_upscale self.n_channels=n_channels activation_maps = np.prod(self.scale_factor) self.conv_1 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_2 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_3 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_4 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_5 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_6 = nn.Sequential(nn.Conv3d(num_features, activation_maps, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=activation_maps), nn.ReLU(inplace=True)) self.conv_7 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_8 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_9 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_10 = nn.Sequential(nn.Conv3d(num_features, n_channels, kernel_size, padding=kernel_size // 2), nn.Sigmoid()) def forward(self, image): output_1 = self.conv_1(image) output_2 = self.conv_2(output_1) output_3a = self.conv_3(output_2) output_3 = torch.add(output_1, output_3a) #torch.mul(output_1, 1) = output_1 #Note for Geetha output_4 = self.conv_4(output_3) output_5a = self.conv_5(output_4) output_5 = torch.add(output_1, output_5a) output_6 = self.conv_6(output_5) suffled_size = tuple(np.multiply(output_6.shape[2:], self.scale_factor)) output_7 = output_6.view(output_6.shape[0], self.n_channels, *suffled_size) output_8 = self.conv_7(output_7) output_9 = self.conv_8(output_8) output_10a = self.conv_9(output_9) output_10 = torch.add(output_8, output_10a) output = self.conv_10(output_10) # Final Loss return output_7, output if __name__ == "__main__": tensor = torch.rand((2, 1, 24, 16, 16)).cuda() model = SRCNN3D(scale_factor=(2,1,3)).cuda() model(tensor) # model = SRCNN3D(1,num_features=64,scale_factor=(2,1,1)).cuda() # from torchsummary import summary # summary(model, input_size=(1, 32, 32, 32))
4,427
56.506494
125
py
DDoS
DDoS-master/models/brokenconv.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class BrokenConvNd(nn.Module): def __init__(self, div_dim, learn_alpha=False, conv_layer=nn.Conv2d, **kwargs): super(BrokenConvNd, self).__init__() self.div_dim = div_dim self.n_conv = np.multiply(*div_dim) self.convs = nn.ModuleList() for _ in range(self.n_conv): self.convs.append(conv_layer(**kwargs)) if learn_alpha: self.alphas = nn.Parameter(data=torch.rand(self.n_conv)) else: self.alphas = [1]*self.n_conv def _split_tensorlist(self, tensor_list, split_size_or_sections, dim): split_tensor_list = [] for t in tensor_list: split_tensor_list += list(torch.split(t, split_size_or_sections=split_size_or_sections, dim=dim+2)) return split_tensor_list def _chunk_tensorlist(self, tensor_list, n_chunks, dim): split_tensor_list = [] for t in tensor_list: split_tensor_list += list(torch.chunk(t, chunks=n_chunks, dim=dim+2)) return split_tensor_list def _cat_tensorlist(self, split_tensor_list, n_split, dim): tensor_list = [] for i in range(0,len(split_tensor_list),n_split): tensor_list.append(torch.cat(split_tensor_list[i:i+n_split], dim=dim+2)) return tensor_list def forward(self, x): # dim = x.shape[2:] # dim_size = np.divide(dim, self.div_dim).astype(np.int) x = [x] for d in range(len(self.div_dim)): # x = self._split_tensorlist(x, split_size_or_sections=int(dim_size[d]), dim=d) x = self._chunk_tensorlist(x, n_chunks=int(self.div_dim[d]), dim=d) res = [] for i in range(self.n_conv): res.append(self.alphas[i] * self.convs[i](x[i])) for d in range(len(self.div_dim)-1,-1,-1): res = self._cat_tensorlist(res, n_split=int(self.div_dim[d]), dim=d) return res[0]
2,300
37.35
111
py
DDoS
DDoS-master/models/SRCNN3Dv2.py
import numpy as np import torch import torch.nn as nn __author__ = "Soumick Chatterjee, Geetha Doddapaneni Gopinath" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Geetha Doddapaneni Gopinath"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class SRCNN3Dv2(nn.Module): def __init__(self,n_channels=1,scale_factor=2,num_features=32,kernel_size=3,stride=1): super(SRCNN3Dv2, self).__init__() if type(scale_factor) is int: self.scale_factor=(scale_factor,scale_factor,scale_factor) else: self.scale_factor=scale_factor self.n_channels=n_channels self.conv_1 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_2 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_3 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_4 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_5 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_6s = nn.ModuleList() in_features = num_features for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: out_features = np.prod(self.scale_factor[i:]) self.conv_6s.append(nn.Sequential(nn.Conv3d(in_features, out_features, kernel_size, stride, padding=kernel_size // 2), nn.BatchNorm3d(num_features=out_features), nn.ReLU(inplace=True))) in_features = out_features // self.scale_factor[i] self.conv_7 = nn.Sequential(nn.Conv3d(n_channels, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_8 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_9 = nn.Sequential(nn.Conv3d(num_features, num_features, kernel_size, padding=kernel_size // 2), nn.BatchNorm3d(num_features=num_features), nn.ReLU(inplace=True)) self.conv_10 = nn.Sequential(nn.Conv3d(num_features, n_channels, kernel_size, padding=kernel_size // 2), nn.Sigmoid()) def forward(self, image): output_1 = self.conv_1(image) output_2 = self.conv_2(output_1) output_3a = self.conv_3(output_2) output_3 = torch.add(output_1, output_3a) #torch.mul(output_1, 1) = output_1 #Note for Geetha output_4 = self.conv_4(output_3) output_5a = self.conv_5(output_4) output_5 = torch.add(output_1, output_5a) output_6 = output_5 mod_ind = 0 for i in range(len(self.scale_factor)): if self.scale_factor[i] > 1: output_6 = self.conv_6s[mod_ind](output_6) mod_ind += 1 suffled_size = list(output_6.shape) suffled_size[1] //= self.scale_factor[i] suffled_size[2+i] *= self.scale_factor[i] output_6 = output_6.view(suffled_size) output_7 = self.conv_7(output_6) output_8 = self.conv_8(output_7) output_9a = self.conv_9(output_8) output_9 = torch.add(output_7, output_9a) output = self.conv_10(output_9) # Final Loss return output_6, output # if __name__ == "__main__": # tensor = torch.rand((2, 1, 24, 16, 16)).cuda() # model = SRCNN3Dv2(scale_factor=(2,1,3)).cuda() # model(tensor) # model = SRCNN3D(1,num_features=64,scale_factor=(2,1,1)).cuda() # from torchsummary import summary # summary(model, input_size=(1, 32, 32, 32))
4,709
51.921348
135
py
DDoS
DDoS-master/models/unet3D_DeepSup.py
# from __future__ import print_function, division import torch import torch.nn as nn import torch.utils.data __author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class conv_block(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(conv_block, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class up_conv(nn.Module): """ Up Convolution Block """ # def __init__(self, in_ch, out_ch): def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(up_conv, self).__init__() self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True)) def forward(self, x): x = self.up(x) return x class U_Net_DeepSup(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1, n1=64): super(U_Net_DeepSup, self).__init__() filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block(in_ch, filters[0]) self.Conv2 = conv_block(filters[0], filters[1]) self.Conv3 = conv_block(filters[1], filters[2]) self.Conv4 = conv_block(filters[2], filters[3]) self.Conv5 = conv_block(filters[3], filters[4]) #1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block(filters[1], 1) self.Conv_d4 = conv_block(filters[2], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return [out, d3_out , d4_out]
5,263
29.783626
110
py
DDoS
DDoS-master/models/ThisNewNet.py
import math import torch.nn as nn from models import * __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class ThisNewNet(nn.Module): def __init__(self, scale_factor, loss_func=None, in_channels=1, n_classes=1, depth=3, batch_norm=False, up_mode="upsample", dropout=0.0, num_features=64, sliceup_first=False, loss_slice_count=2, loss_inplane=True): super(ThisNewNet, self).__init__() self.in_plane_upsampler = UNet(in_channels=in_channels, n_classes=n_classes, depth=depth, wf=round(math.log(num_features,2)), batch_norm=batch_norm, up_mode=up_mode, dropout=dropout) self.slice_upsampler = SRCNN3D(n_channels=in_channels, scale_factor=scale_factor, num_features=num_features) self.sliceup_first = sliceup_first self.loss_func = loss_func self.scale_factor = scale_factor self.loss_slice_count = loss_slice_count self.loss_inplane = loss_inplane def forward(self, images, gt=None): if self.sliceup_first: _, up_images = self.slice_upsampler(images) output = self.in_plane_upsampler(up_images) else: up_images = self.in_plane_upsampler(images) aux_out, output = self.slice_upsampler(up_images) if gt is None or self.loss_func is None: return output else: if self.sliceup_first: loss = self.loss_func(output, gt) else: in_plane_loss = self.loss_func(up_images, gt[:,:,::self.scale_factor[0],...]) #unet loss slice_aux_loss = self.loss_func(aux_out, gt) #aux srcnn loss slice_main_loss = self.loss_func(output, gt) #srcnn loss loss = slice_main_loss if self.loss_inplane: loss += in_plane_loss if self.loss_slice_count > 1: loss += slice_aux_loss # loss = in_plane_loss + slice_aux_loss + slice_main_loss return output, loss
2,283
46.583333
218
py
DDoS
DDoS-master/models/unet3D.py
# Adapted from https://discuss.pytorch.org/t/unet-implementation/426 import torch from torch import nn import torch.nn.functional as F __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class UNet(nn.Module): """ Implementation of U-Net: Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015) https://arxiv.org/abs/1505.04597 Using the default arguments will yield the exact version used in the original paper Args: in_channels (int): number of input channels n_classes (int): number of output channels depth (int): depth of the network wf (int): number of filters in the first layer is 2**wf padding (bool): if True, apply padding such that the input shape is the same as the output. This may introduce artifacts batch_norm (bool): Use BatchNorm after layers with an activation function up_mode (str): one of 'upconv' or 'upsample'. 'upconv' will use transposed convolutions for learned upsampling. 'upsample' will use bilinear upsampling. """ def __init__(self, in_channels=1, n_classes=1, depth=3, wf=6, padding=True, batch_norm=False, up_mode='upconv', dropout=False): super(UNet, self).__init__() assert up_mode in ('upconv', 'upsample') self.padding = padding self.depth = depth self.dropout = nn.Dropout3d() if dropout else nn.Sequential() prev_channels = in_channels self.down_path = nn.ModuleList() for i in range(depth): self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i), padding, batch_norm)) prev_channels = 2**(wf+i) self.up_path = nn.ModuleList() for i in reversed(range(depth - 1)): self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode, padding, batch_norm)) prev_channels = 2**(wf+i) self.last = nn.Conv3d(prev_channels, n_classes, kernel_size=1) def forward(self, x): blocks = [] for i, down in enumerate(self.down_path): x = down(x) if i != len(self.down_path)-1: blocks.append(x) x = F.avg_pool3d(x, 2) x = self.dropout(x) for i, up in enumerate(self.up_path): x = up(x, blocks[-i-1]) return self.last(x) class UNetConvBlock(nn.Module): def __init__(self, in_size, out_size, padding, batch_norm): super(UNetConvBlock, self).__init__() block = [] block.append(nn.Conv3d(in_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) block.append(nn.Conv3d(out_size, out_size, kernel_size=3, padding=int(padding))) block.append(nn.ReLU()) if batch_norm: block.append(nn.BatchNorm3d(out_size)) self.block = nn.Sequential(*block) def forward(self, x): out = self.block(x) return out class UNetUpBlock(nn.Module): def __init__(self, in_size, out_size, up_mode, padding, batch_norm): super(UNetUpBlock, self).__init__() if up_mode == 'upconv': self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=2, stride=2) elif up_mode == 'upsample': self.up = nn.Sequential(nn.Upsample(mode='trilinear', scale_factor=2), nn.Conv3d(in_size, out_size, kernel_size=1)) self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm) def center_crop(self, layer, target_size): _, _, layer_depth, layer_height, layer_width = layer.size() diff_z = (layer_depth - target_size[0]) // 2 diff_y = (layer_height - target_size[1]) // 2 diff_x = (layer_width - target_size[2]) // 2 return layer[:, :, diff_z:(diff_z + target_size[0]), diff_y:(diff_y + target_size[1]), diff_x:(diff_x + target_size[2])] # _, _, layer_height, layer_width = layer.size() #for 2D data # diff_y = (layer_height - target_size[0]) // 2 # diff_x = (layer_width - target_size[1]) // 2 # return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])] def forward(self, x, bridge): up = self.up(x) # bridge = self.center_crop(bridge, up.shape[2:]) #sending shape ignoring 2 digit, so target size start with 0,1,2 up = F.interpolate(up, size=bridge.shape[2:], mode='trilinear') out = torch.cat([up, bridge], 1) out = self.conv_block(out) return out
5,245
38.443609
128
py
DDoS
DDoS-master/models/ReconResNet.py
#!/usr/bin/env python import torch.nn as nn from tricorder.torch.transforms import Interpolator __author__ = "Soumick Chatterjee" __copyright__ = "Copyright 2022, Soumick Chatterjee & OvGU:ESF:MEMoRIAL" __credits__ = ["Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Under Testing" class ResidualBlock(nn.Module): def __init__(self, in_features, drop_prob=0.2): super(ResidualBlock, self).__init__() conv_block = [layer_pad(1), layer_conv(in_features, in_features, 3), layer_norm(in_features), act_relu(), layer_drop(p=drop_prob, inplace=True), layer_pad(1), layer_conv(in_features, in_features, 3), layer_norm(in_features)] self.conv_block = nn.Sequential(*conv_block) def forward(self, x): return x + self.conv_block(x) class DownsamplingBlock(nn.Module): def __init__(self, in_features, out_features): super(DownsamplingBlock, self).__init__() conv_block = [layer_conv(in_features, out_features, 3, stride=2, padding=1), layer_norm(out_features), act_relu()] self.conv_block = nn.Sequential(*conv_block) def forward(self, x): return self.conv_block(x) class UpsamplingBlock(nn.Module): def __init__(self, in_features, out_features, mode="convtrans", interpolator=None, post_interp_convtrans=False): super(UpsamplingBlock, self).__init__() self.interpolator = interpolator self.mode = mode self.post_interp_convtrans = post_interp_convtrans if self.post_interp_convtrans: self.post_conv = layer_conv(out_features, out_features, 1) if mode == "convtrans": conv_block = [layer_convtrans( in_features, out_features, 3, stride=2, padding=1, output_padding=1), ] else: conv_block = [layer_pad(1), layer_conv(in_features, out_features, 3), ] conv_block += [layer_norm(out_features), act_relu()] self.conv_block = nn.Sequential(*conv_block) def forward(self, x, out_shape=None): if self.mode == "convtrans": if self.post_interp_convtrans: x = self.conv_block(x) if x.shape[2:] != out_shape: return self.post_conv(self.interpolator(x, out_shape)) else: return x else: return self.conv_block(x) else: return self.conv_block(self.interpolator(x, out_shape)) class ResNet(nn.Module): def __init__(self, in_channels=1, out_channels=1, res_blocks=14, starting_nfeatures=64, updown_blocks=2, is_relu_leaky=True, do_batchnorm=False, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans', post_interp_convtrans=False, is3D=False): # should use 14 as that gives number of trainable parameters close to number of possible pixel values in a image 256x256 super(ResNet, self).__init__() layers = {} if is3D: layers["layer_conv"] = nn.Conv3d layers["layer_convtrans"] = nn.ConvTranspose3d if do_batchnorm: layers["layer_norm"] = nn.BatchNorm3d else: layers["layer_norm"] = nn.InstanceNorm3d layers["layer_drop"] = nn.Dropout3d if is_replicatepad == 0: layers["layer_pad"] = nn.ReflectionPad3d elif is_replicatepad == 1: layers["layer_pad"] = nn.ReplicationPad3d layers["interp_mode"] = 'trilinear' else: layers["layer_conv"] = nn.Conv2d layers["layer_convtrans"] = nn.ConvTranspose2d if do_batchnorm: layers["layer_norm"] = nn.BatchNorm2d else: layers["layer_norm"] = nn.InstanceNorm2d layers["layer_drop"] = nn.Dropout2d if is_replicatepad == 0: layers["layer_pad"] = nn.ReflectionPad2d elif is_replicatepad == 1: layers["layer_pad"] = nn.ReplicationPad2d layers["interp_mode"] = 'bilinear' if is_relu_leaky: layers["act_relu"] = nn.PReLU else: layers["act_relu"] = nn.ReLU globals().update(layers) self.forwardV = forwardV self.upinterp_algo = upinterp_algo interpolator = Interpolator( mode=layers["interp_mode"] if self.upinterp_algo == "convtrans" else self.upinterp_algo) # Initial convolution block intialConv = [layer_pad(3), layer_conv(in_channels, starting_nfeatures, 7), layer_norm(starting_nfeatures), act_relu()] # Downsampling [need to save the shape for upsample] downsam = [] in_features = starting_nfeatures out_features = in_features*2 for _ in range(updown_blocks): downsam.append(DownsamplingBlock(in_features, out_features)) in_features = out_features out_features = in_features*2 # Residual blocks resblocks = [] for _ in range(res_blocks): resblocks += [ResidualBlock(in_features, res_drop_prob)] # Upsampling upsam = [] out_features = in_features//2 for _ in range(updown_blocks): upsam.append(UpsamplingBlock(in_features, out_features, self.upinterp_algo, interpolator, post_interp_convtrans)) in_features = out_features out_features = in_features//2 # Output layer finalconv = [layer_pad(3), layer_conv(starting_nfeatures, out_channels, 7), ] if out_act == "sigmoid": finalconv += [nn.Sigmoid(), ] elif out_act == "relu": finalconv += [act_relu(), ] elif out_act == "tanh": finalconv += [nn.Tanh(), ] self.intialConv = nn.Sequential(*intialConv) self.downsam = nn.ModuleList(downsam) self.resblocks = nn.Sequential(*resblocks) self.upsam = nn.ModuleList(upsam) self.finalconv = nn.Sequential(*finalconv) if self.forwardV == 0: self.forward = self.forwardV0 elif self.forwardV == 1: self.forward = self.forwardV1 elif self.forwardV == 2: self.forward = self.forwardV2 elif self.forwardV == 3: self.forward = self.forwardV3 elif self.forwardV == 4: self.forward = self.forwardV4 elif self.forwardV == 5: self.forward = self.forwardV5 def forwardV0(self, x): # v0: Original Version x = self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(x.shape[2:]) x = downblock(x) x = self.resblocks(x) for i, upblock in enumerate(self.upsam): x = upblock(x, shapes[-1-i]) return self.finalconv(x) def forwardV1(self, x): # v1: input is added to the final output out = self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(out.shape[2:]) out = downblock(out) out = self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) return x + self.finalconv(out) def forwardV2(self, x): # v2: residual of v1 + input to the residual blocks added back with the output out = self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(out.shape[2:]) out = downblock(out) out = out + self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) return x + self.finalconv(out) def forwardV3(self, x): # v3: residual of v2 + input of the initial conv added back with the output out = x + self.intialConv(x) shapes = [] for downblock in self.downsam: shapes.append(out.shape[2:]) out = downblock(out) out = out + self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) return x + self.finalconv(out) def forwardV4(self, x): # v4: residual of v3 + output of the initial conv added back with the input of final conv iniconv = x + self.intialConv(x) shapes = [] if len(self.downsam) > 0: for i, downblock in enumerate(self.downsam): if i == 0: shapes.append(iniconv.shape[2:]) out = downblock(iniconv) else: shapes.append(out.shape[2:]) out = downblock(out) else: out = iniconv out = out + self.resblocks(out) for i, upblock in enumerate(self.upsam): out = upblock(out, shapes[-1-i]) out = iniconv + out return x + self.finalconv(out) def forwardV5(self, x): # v5: residual of v4 + individual down blocks with individual up blocks outs = [x + self.intialConv(x)] shapes = [] for i, downblock in enumerate(self.downsam): shapes.append(outs[-1].shape[2:]) outs.append(downblock(outs[-1])) outs[-1] = outs[-1] + self.resblocks(outs[-1]) for i, upblock in enumerate(self.upsam): outs[-1] = upblock(outs[-1], shapes[-1-i]) outs[-1] = outs[-2] + outs.pop() return x + self.finalconv(outs.pop())
9,909
36.537879
257
py
DDoS
DDoS-master/models/unet3DvSeg_DeepSup.py
# from __future__ import print_function, division import torch import torch.nn as nn import torch.utils.data __author__ = "Kartik Prabhu, Mahantesh Pattadkal, and Soumick Chatterjee" __copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany" __credits__ = ["Kartik Prabhu", "Mahantesh Pattadkal", "Soumick Chatterjee"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "Soumick Chatterjee" __email__ = "soumick.chatterjee@ovgu.de" __status__ = "Production" class conv_block(nn.Module): """ Convolution Block """ def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(conv_block, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True), nn.Conv3d(in_channels=out_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class up_conv(nn.Module): """ Up Convolution Block """ # def __init__(self, in_ch, out_ch): def __init__(self, in_channels, out_channels, k_size=3, stride=1, padding=1, bias=True): super(up_conv, self).__init__() self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=k_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(num_features=out_channels), nn.ReLU(inplace=True)) def forward(self, x): x = self.up(x) return x class U_Net_DeepSup(nn.Module): """ UNet - Basic Implementation Input _ [batch * channel(# of channels of each image) * depth(# of frames) * height * width]. Paper : https://arxiv.org/abs/1505.04597 """ def __init__(self, in_ch=1, out_ch=1, n1=64): super(U_Net_DeepSup, self).__init__() filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16] # 64,128,256,512,1024 self.Maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2) self.Maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2) self.Conv1 = conv_block(in_ch, filters[0]) self.Conv2 = conv_block(filters[0], filters[1]) self.Conv3 = conv_block(filters[1], filters[2]) self.Conv4 = conv_block(filters[2], filters[3]) self.Conv5 = conv_block(filters[3], filters[4]) #1x1x1 Convolution for Deep Supervision self.Conv_d3 = conv_block(filters[1], 1) self.Conv_d4 = conv_block(filters[2], 1) self.Up5 = up_conv(filters[4], filters[3]) self.Up_conv5 = conv_block(filters[4], filters[3]) self.Up4 = up_conv(filters[3], filters[2]) self.Up_conv4 = conv_block(filters[3], filters[2]) self.Up3 = up_conv(filters[2], filters[1]) self.Up_conv3 = conv_block(filters[2], filters[1]) self.Up2 = up_conv(filters[1], filters[0]) self.Up_conv2 = conv_block(filters[1], filters[0]) self.Conv = nn.Conv3d(filters[0], out_ch, kernel_size=1, stride=1, padding=0) # self.active = torch.nn.Sigmoid() def forward(self, x): # print("unet") # print(x.shape) # print(padded.shape) e1 = self.Conv1(x) # print("conv1:") # print(e1.shape) e2 = self.Maxpool1(e1) e2 = self.Conv2(e2) # print("conv2:") # print(e2.shape) e3 = self.Maxpool2(e2) e3 = self.Conv3(e3) # print("conv3:") # print(e3.shape) e4 = self.Maxpool3(e3) e4 = self.Conv4(e4) # print("conv4:") # print(e4.shape) e5 = self.Maxpool4(e4) e5 = self.Conv5(e5) # print("conv5:") # print(e5.shape) d5 = self.Up5(e5) # print("d5:") # print(d5.shape) # print("e4:") # print(e4.shape) d5 = torch.cat((e4, d5), dim=1) d5 = self.Up_conv5(d5) # print("upconv5:") # print(d5.size) d4 = self.Up4(d5) # print("d4:") # print(d4.shape) d4 = torch.cat((e3, d4), dim=1) d4 = self.Up_conv4(d4) d4_out = self.Conv_d4(d4) # print("upconv4:") # print(d4.shape) d3 = self.Up3(d4) d3 = torch.cat((e2, d3), dim=1) d3 = self.Up_conv3(d3) d3_out = self.Conv_d3(d3) # print("upconv3:") # print(d3.shape) d2 = self.Up2(d3) d2 = torch.cat((e1, d2), dim=1) d2 = self.Up_conv2(d2) # print("upconv2:") # print(d2.shape) out = self.Conv(d2) # print("out:") # print(out.shape) # d1 = self.active(out) return [out, d3_out , d4_out]
5,263
29.783626
110
py