code stringlengths 101 5.91M |
|---|
_require_initialized
def get_worker_info(worker_name=None):
if worker_name:
return _get_current_rpc_agent().get_worker_info(worker_name)
else:
return _get_current_rpc_agent().get_worker_info() |
def _clean_up_temporary_files(dataset_dir):
for filename in [_TRAIN_DATA_FILENAME, _TRAIN_LABELS_FILENAME, _TEST_DATA_FILENAME, _TEST_LABELS_FILENAME]:
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath) |
def get_padding_value(padding, kernel_size, **kwargs):
dynamic = False
if isinstance(padding, str):
padding = padding.lower()
if (padding == 'same'):
if is_static_pad(kernel_size, **kwargs):
padding = get_padding(kernel_size, **kwargs)
else:
padding = 0
dynamic = True
elif (padding == 'valid'):
padding = 0
else:
padding = get_padding(kernel_size, **kwargs)
return (padding, dynamic) |
def partial_dtype_fmt():
ld = np.dtype('longdouble')
partial_ld_off = partial_ld_offset()
return dt_fmt().format(ld.itemsize, partial_ld_off, (partial_ld_off + ld.itemsize)) |
def test_categorical_column_with_numbers():
data = pd.DataFrame({'category_col': [1, 2, 1, 2, 1, 2, np.nan, 1, 1, np.nan, 2, 2, np.nan, 2, 1, 1, np.nan, 1, 2, 2], 'numerical_col': np.random.rand(20)})
metadata = SingleTableMetadata()
metadata.detect_from_dataframe(data)
synthesizer = GaussianCopulaSynthesizer(metadata)
synthesizer.fit(data)
synthetic_data = synthesizer.sample(20)
expected_dtypes = pd.Series({'category_col': 'float64', 'numerical_col': 'float64'})
pd.testing.assert_series_equal(synthetic_data.dtypes, expected_dtypes)
unique_values = synthetic_data['category_col'].unique()
assert (pd.isna(unique_values).sum() == 1)
assert (set(unique_values[(~ pd.isna(unique_values))]) == {1, 2}) |
class ResidualCNN(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
def forward(self, x):
residual = x
x = self.layer_norm1(x)
x = F.leaky_relu(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = F.leaky_relu(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x |
((not workspace.C.use_mkldnn), 'No MKLDNN support.')
class ExpandDimsSqueezeTest(hu.HypothesisTestCase):
(squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3), inplace=st.booleans(), **mu.gcs)
def test_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [(1 if (dim in squeeze_dims) else np.random.randint(1, 5)) for dim in range(4)]
X = np.random.rand(*shape).astype(np.float32)
op = core.CreateOperator('Squeeze', 'X', ('X' if inplace else 'Y'), dims=squeeze_dims)
self.assertDeviceChecks(dc, op, [X], [0])
(squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3), inplace=st.booleans(), **mu.gcs_cpu_ideep)
def test_squeeze_fallback(self, squeeze_dims, inplace, gc, dc):
shape = [(1 if (dim in squeeze_dims) else np.random.randint(1, 5)) for dim in range(4)]
X = np.random.rand(*shape).astype(np.float32)
op0 = core.CreateOperator('Squeeze', 'X0', ('X0' if inplace else 'Y0'), dims=squeeze_dims, device_option=dc[0])
workspace.FeedBlob('X0', X, dc[0])
workspace.RunOperatorOnce(op0)
Y0 = workspace.FetchBlob(('X0' if inplace else 'Y0'))
op1 = core.CreateOperator('Squeeze', 'X1', ('X1' if inplace else 'Y1'), dims=squeeze_dims, device_option=dc[1])
workspace.FeedBlob('X1', X, dc[0])
workspace.RunOperatorOnce(op1)
Y1 = workspace.FetchBlob(('X1' if inplace else 'Y1'))
if (not np.allclose(Y0, Y1, atol=0.01, rtol=0.01)):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs((Y1 - Y0))))
self.assertTrue(False)
(squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3), inplace=st.booleans(), **mu.gcs)
def test_expand_dims(self, squeeze_dims, inplace, gc, dc):
oshape = [(1 if (dim in squeeze_dims) else np.random.randint(2, 5)) for dim in range(4)]
nshape = [s for s in oshape if (s != 1)]
expand_dims = [i for i in range(len(oshape)) if (oshape[i] == 1)]
X = np.random.rand(*nshape).astype(np.float32)
op = core.CreateOperator('ExpandDims', 'X', ('X' if inplace else 'Y'), dims=expand_dims)
self.assertDeviceChecks(dc, op, [X], [0])
(squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3), inplace=st.booleans(), **mu.gcs_cpu_ideep)
def test_expand_dims_fallback(self, squeeze_dims, inplace, gc, dc):
oshape = [(1 if (dim in squeeze_dims) else np.random.randint(2, 5)) for dim in range(4)]
nshape = [s for s in oshape if (s != 1)]
expand_dims = [i for i in range(len(oshape)) if (oshape[i] == 1)]
X = np.random.rand(*nshape).astype(np.float32)
op0 = core.CreateOperator('ExpandDims', 'X0', ('X0' if inplace else 'Y0'), dims=expand_dims, device_option=dc[0])
workspace.FeedBlob('X0', X, dc[0])
workspace.RunOperatorOnce(op0)
Y0 = workspace.FetchBlob(('X0' if inplace else 'Y0'))
op1 = core.CreateOperator('ExpandDims', 'X1', ('X1' if inplace else 'Y1'), dims=expand_dims, device_option=dc[1])
workspace.FeedBlob('X1', X, dc[0])
workspace.RunOperatorOnce(op1)
Y1 = workspace.FetchBlob(('X1' if inplace else 'Y1'))
if (not np.allclose(Y0, Y1, atol=0.01, rtol=0.01)):
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs((Y1 - Y0))))
self.assertTrue(False) |
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
(self.add_module('norm1', nn.BatchNorm2d(num_input_features)),)
(self.add_module('relu1', nn.ReLU(inplace=True)),)
(self.add_module('conv1', nn.Conv2d(num_input_features, (bn_size * growth_rate), kernel_size=1, stride=1, bias=False)),)
(self.add_module('norm2', nn.BatchNorm2d((bn_size * growth_rate))),)
(self.add_module('relu2', nn.ReLU(inplace=True)),)
(self.add_module('conv2', nn.Conv2d((bn_size * growth_rate), growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),)
self.drop_rate = drop_rate
if (self.drop_rate > 0):
self.add_module('dropout', nn.Dropout(p=self.drop_rate))
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
return torch.cat([x, new_features], 1) |
def make_replay_loader(replay_dir, max_size, batch_size, num_workers, save_snapshot, nstep, discount):
max_size_per_worker = (max_size // max(1, num_workers))
iterable = ReplayBuffer(replay_dir, max_size_per_worker, num_workers, nstep, discount, fetch_every=1000, save_snapshot=save_snapshot)
loader = torch.utils.data.DataLoader(iterable, batch_size=batch_size, num_workers=num_workers, pin_memory=True, worker_init_fn=_worker_init_fn)
return loader |
class TArtPointVisitor(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
VnLowH = _swig_property(_snap.TArtPointVisitor_VnLowH_get, _snap.TArtPointVisitor_VnLowH_set)
ParentH = _swig_property(_snap.TArtPointVisitor_ParentH_get, _snap.TArtPointVisitor_ParentH_set)
ArtSet = _swig_property(_snap.TArtPointVisitor_ArtSet_get, _snap.TArtPointVisitor_ArtSet_set)
Time = _swig_property(_snap.TArtPointVisitor_Time_get, _snap.TArtPointVisitor_Time_set)
def __init__(self, *args):
_snap.TArtPointVisitor_swiginit(self, _snap.new_TArtPointVisitor(*args))
def DiscoverNode(self, NId):
return _snap.TArtPointVisitor_DiscoverNode(self, NId)
def FinishNode(self, NId):
return _snap.TArtPointVisitor_FinishNode(self, NId)
def ExamineEdge(self, NId1, NId2):
return _snap.TArtPointVisitor_ExamineEdge(self, NId1, NId2)
def TreeEdge(self, NId1, NId2):
return _snap.TArtPointVisitor_TreeEdge(self, NId1, NId2)
def BackEdge(self, NId1, NId2):
return _snap.TArtPointVisitor_BackEdge(self, NId1, NId2)
def FwdEdge(self, NId1, NId2):
return _snap.TArtPointVisitor_FwdEdge(self, NId1, NId2)
__swig_destroy__ = _snap.delete_TArtPointVisitor |
class PcxImageFile(ImageFile.ImageFile):
format = 'PCX'
format_description = 'Paintbrush'
def _open(self):
s = self.fp.read(128)
if (not _accept(s)):
raise SyntaxError('not a PCX file')
bbox = (i16(s, 4), i16(s, 6), (i16(s, 8) + 1), (i16(s, 10) + 1))
if ((bbox[2] <= bbox[0]) or (bbox[3] <= bbox[1])):
raise SyntaxError('bad PCX image size')
logger.debug('BBox: %s %s %s %s', *bbox)
version = i8(s[1])
bits = i8(s[3])
planes = i8(s[65])
stride = i16(s, 66)
logger.debug('PCX version %s, bits %s, planes %s, stride %s', version, bits, planes, stride)
self.info['dpi'] = (i16(s, 12), i16(s, 14))
if ((bits == 1) and (planes == 1)):
mode = rawmode = '1'
elif ((bits == 1) and (planes in (2, 4))):
mode = 'P'
rawmode = ('P;%dL' % planes)
self.palette = ImagePalette.raw('RGB', s[16:64])
elif ((version == 5) and (bits == 8) and (planes == 1)):
mode = rawmode = 'L'
self.fp.seek((- 769), io.SEEK_END)
s = self.fp.read(769)
if ((len(s) == 769) and (i8(s[0]) == 12)):
for i in range(256):
if (s[((i * 3) + 1):((i * 3) + 4)] != (o8(i) * 3)):
mode = rawmode = 'P'
break
if (mode == 'P'):
self.palette = ImagePalette.raw('RGB', s[1:])
self.fp.seek(128)
elif ((version == 5) and (bits == 8) and (planes == 3)):
mode = 'RGB'
rawmode = 'RGB;L'
else:
raise OSError('unknown PCX mode')
self.mode = mode
self._size = ((bbox[2] - bbox[0]), (bbox[3] - bbox[1]))
bbox = ((0, 0) + self.size)
logger.debug('size: %sx%s', *self.size)
self.tile = [('pcx', bbox, self.fp.tell(), (rawmode, (planes * stride)))] |
def to_floatTensor(x: (list, tuple, np.ndarray)):
if isinstance(x, torch.Tensor):
return x.float()
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float()
else:
return torch.tensor(x, dtype=torch.float) |
class SequenceDataset():
def __init__(self, data, batch_size, number_batches, minimum_size=10):
self.current_batch = 0
self.number_batches = number_batches
self.items = []
for i_sequence in range(len(data[0])):
if (batch_size is None):
self.items.append([data[i][i_sequence] for i in range(len(data))])
else:
for i_step in range(0, ((len(data[0][i_sequence]) - minimum_size) + 1), batch_size):
self.items.append([data[i][i_sequence][i_step:(i_step + batch_size)] for i in range(len(data))])
self.shuffle()
def shuffle(self):
numpy.random.shuffle(self.items)
def iterate(self, update=True):
for b in range(self.number_batches):
(yield self.items[((self.current_batch + b) % len(self.items))])
if update:
self.update()
def update(self):
if ((self.current_batch + self.number_batches) >= len(self.items)):
self.shuffle()
self.current_batch = 0
else:
self.current_batch += self.number_batches |
def has_valid_keypoint(obj):
if (max(obj['keypoints']) == 0):
return False
return True |
def get_single_dataset(data_dir, FaceDataset, data_name='', train=True, label=None, img_size=256, map_size=32, transform=None, debug_subset_size=None, UUID=(- 1)):
if train:
if (data_name in ['OULU']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'OULU-NPU/preposess'), split='train', label=label, transform=transform, UUID=UUID)
elif (data_name in ['CASIA_MFSD']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'CASIA_faceAntisp/preposess'), split='train', label=label, transform=transform, UUID=UUID)
elif (data_name in ['Replay_attack']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'Replay/preposess'), split='train', label=label, transform=transform, UUID=UUID)
elif (data_name in ['MSU_MFSD']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'MSU-MFSD/preposess'), split='train', label=label, transform=transform, UUID=UUID)
if (debug_subset_size is not None):
data_set = torch.utils.data.Subset(data_set, range(0, debug_subset_size))
else:
if (data_name in ['OULU']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'OULU-NPU/preposess'), split='test', label=label, transform=transform, map_size=map_size, UUID=UUID)
elif (data_name in ['CASIA_MFSD']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'CASIA_faceAntisp/preposess'), split='test', label=label, transform=transform, map_size=map_size, UUID=UUID)
elif (data_name in ['Replay_attack']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'Replay/preposess'), split='test', label=label, transform=transform, map_size=map_size, UUID=UUID)
elif (data_name in ['MSU_MFSD']):
data_set = FaceDataset(data_name, os.path.join(data_dir, 'MSU-MFSD/preposess'), split='test', label=label, transform=transform, map_size=map_size, UUID=UUID)
if (debug_subset_size is not None):
data_set = torch.utils.data.Subset(data_set, range(0, debug_subset_size))
return data_set |
def test_box_center_distance():
p1 = np.array([1, 1, 3, 3])
p2 = np.array([2, 2, 4, 2])
assert (utils.box_center_distance(p1, p2) == 1) |
def add_block_f(inputs, outputs):
return nn.Sequential(nn.Conv2d(in_channels=inputs, out_channels=outputs, kernel_size=3, padding=1), nn.LeakyReLU(0.2), nn.BatchNorm2d(outputs), nn.Conv2d(in_channels=outputs, out_channels=outputs, kernel_size=3, padding=1), nn.LeakyReLU(0.2), nn.BatchNorm2d(outputs), nn.Conv2d(in_channels=outputs, out_channels=outputs, kernel_size=3, padding=1), nn.LeakyReLU(0.2), nn.BatchNorm2d(outputs)) |
('mlm_seq_loader')
class MLMMaskedSequenceDatasetReader(DatasetReader):
def __init__(self, tokenizer: Tokenizer=None, token_indexers: Dict[(str, TokenIndexer)]=None, max_doc_length: int=(- 1), min_doc_length: int=(- 1), mlm_mask_whole_words: bool=True, mask_probability: float=0.1, mlm_mask_replace_probability: float=0.5, mlm_mask_random_probability: float=0.5, bias_sampling_method='None', bias_merge_alpha=0.5, make_multiple_of=8, lazy: bool=False) -> None:
super().__init__(lazy)
self._tokenizer = tokenizer
self._token_indexers = (token_indexers or {'tokens': SingleIdTokenIndexer(lowercase_tokens=True)})
self.max_seq_length = max_doc_length
self.min_seq_length = min_doc_length
self.max_title_length = 30
self.min_title_length = (- 1)
self.mask_title = False
self.token_type = 'full'
if (type(tokenizer) == PretrainedTransformerTokenizer):
self.token_type = 'hf'
self.padding_value = Token(text='[PAD]', text_id=tokenizer.tokenizer.pad_token_id)
self.mask_value = Token(text='[MASK]', text_id=tokenizer.tokenizer.mask_token_id)
self.cls_value = Token(text='[CLS]', text_id=tokenizer.tokenizer.cls_token_id)
else:
self.padding_value = Token(text='', text_id=0)
self.mask_value = Token(text='[MASK]', text_id=2)
self.mask_probability = mask_probability
self.mlm_mask_replace_probability = mlm_mask_replace_probability
self.mlm_mask_random_probability = mlm_mask_random_probability
self.mlm_mask_whole_words = mlm_mask_whole_words
self.bias_sampling_method = bias_sampling_method
self.bias_merge_alpha = bias_merge_alpha
self.token_counter = np.ones(tokenizer.tokenizer.vocab_size, dtype=int)
self.make_multiple_of = make_multiple_of
def _read(self, file_path):
with open(cached_path(file_path), 'r', encoding='utf8') as data_file:
for (line_num, line) in enumerate(data_file):
line = line.strip('\n')
if (not line):
continue
line_parts = line.split('\t')
if (len(line_parts) == 2):
(seq_id, seq_text) = line_parts
seq_title = None
elif (len(line_parts) == 3):
(seq_id, seq_title, seq_text) = line_parts
if ((seq_title == '') or (seq_text == '')):
continue
else:
raise ConfigurationError(('Invalid line format: %s (line number %d)' % (line, (line_num + 1))))
(yield self.text_to_instance(seq_id, seq_text, seq_title))
def text_to_instance(self, seq_id: str, seq_text: str, seq_title: str) -> Instance:
seq_id_field = MetadataField(seq_id)
seq_tokenized = self._tokenizer.tokenize(seq_text[:10000])
if (self.max_seq_length > (- 1)):
seq_tokenized = seq_tokenized[:self.max_seq_length]
if ((self.min_seq_length > (- 1)) and (len(seq_tokenized) < self.min_seq_length)):
seq_tokenized = (seq_tokenized + ([self.padding_value] * (self.min_seq_length - len(seq_tokenized))))
if ((self.make_multiple_of > (- 1)) and ((len(seq_tokenized) % self.make_multiple_of) != 0)):
seq_tokenized = (seq_tokenized + ([self.padding_value] * (self.make_multiple_of - (len(seq_tokenized) % self.make_multiple_of))))
seq_tokenized_orig = copy.deepcopy(seq_tokenized)
mask_binary = ([0] * len(seq_tokenized))
suffix = '##'
if (self.token_type == 'full'):
for i in range(len(seq_tokenized)):
if (random.uniform(0, 1) < self.mask_probability):
if (random.uniform(0, 1) < self.mlm_mask_replace_probability):
seq_tokenized[i] = self.mask_value
mask_binary[i] = 1
else:
tfs = np.ndarray(len(seq_tokenized_orig))
for (i, t) in enumerate(seq_tokenized_orig):
self.token_counter[t.text_id] += 1
tfs[i] = self.token_counter[t.text_id]
tf_class = (tfs < np.median(tfs))
if (self.bias_sampling_method == 'None'):
for i in range(len(seq_tokenized)):
replace_with_mask = False
replace_with_random = False
if ((i == 0) or ((not self.mlm_mask_whole_words) or seq_tokenized_orig[(i - 1)].text.startswith(suffix))):
if (random.uniform(0, 1) < self.mask_probability):
if (random.uniform(0, 1) < self.mlm_mask_replace_probability):
replace_with_mask = True
seq_tokenized[i] = self.mask_value
elif (random.uniform(0, 1) < self.mlm_mask_random_probability):
replace_with_random = True
id_ = random.randint(0, self._tokenizer.tokenizer.vocab_size)
tok = self._tokenizer.tokenizer.convert_ids_to_tokens(id_)
seq_tokenized[i] = Token(text=tok, text_id=id_)
mask_binary[i] = 1
if (self.mlm_mask_whole_words and (not seq_tokenized_orig[i].text.startswith(suffix))):
for t in range((i + 1), len(seq_tokenized)):
if (replace_with_mask == True):
seq_tokenized[t] = self.mask_value
elif (replace_with_random == True):
id_ = random.randint(0, self._tokenizer.tokenizer.vocab_size)
tok = self._tokenizer.tokenizer.convert_ids_to_tokens(id_)
seq_tokenized[t] = Token(text=tok, text_id=id_)
mask_binary[t] = 1
if seq_tokenized_orig[t].text.startswith(suffix):
break
elif ((self.bias_sampling_method == 'tf') or (self.bias_sampling_method == 'log-tf')):
if (self.bias_sampling_method == 'log-tf'):
tfs = np.log2(tfs)
probability = (tfs.sum() / tfs)
probability /= probability.max()
probability *= self.mask_probability
probability = (probability * (self.mask_probability / probability.mean()))
probability[(probability > 0.9)] = 0.9
masks = torch.bernoulli(torch.from_numpy(probability))
for i in range(len(seq_tokenized)):
if (masks[i] == 1):
replace_with_mask = False
if (random.uniform(0, 1) < self.mlm_mask_replace_probability):
replace_with_mask = True
seq_tokenized[i] = self.mask_value
mask_binary[i] = 1
if ((i > 0) and (not seq_tokenized_orig[(i - 1)].text.endswith(suffix))):
for t in list(range(0, (i - 1)))[::(- 1)]:
if (replace_with_mask == True):
seq_tokenized[t] = self.mask_value
mask_binary[t] = 1
if seq_tokenized_orig[t].text.endswith(suffix):
break
if (not seq_tokenized_orig[i].text.endswith(suffix)):
for t in range((i + 1), len(seq_tokenized)):
if (replace_with_mask == True):
seq_tokenized[t] = self.mask_value
mask_binary[t] = 1
if seq_tokenized_orig[t].text.endswith(suffix):
break
seq_field = TextField(seq_tokenized, self._token_indexers)
seq_field_orig = TextField(seq_tokenized_orig, self._token_indexers)
if (seq_title != None):
title_tokenized = self._tokenizer.tokenize(seq_title)
if (self.max_title_length > (- 1)):
title_tokenized = title_tokenized[:self.max_title_length]
if ((self.min_title_length > (- 1)) and (len(title_tokenized) < self.min_title_length)):
title_tokenized = (title_tokenized + ([self.padding_value] * (self.min_title_length - len(title_tokenized))))
title_tokenized.insert(0, self.cls_value)
title_tokenized_masked = copy.deepcopy(title_tokenized)
title_mask_binary = ([0] * len(title_tokenized_masked))
for i in range(len(title_tokenized_masked)):
if (random.uniform(0, 1) < self.mask_probability):
if (random.uniform(0, 1) < self.mlm_mask_replace_probability):
replace_with_mask = True
title_tokenized_masked[i] = self.mask_value
title_mask_binary[i] = 1
title_field = TextField(title_tokenized, self._token_indexers)
title_field_masked = TextField(title_tokenized_masked, self._token_indexers)
return Instance({'seq_id': seq_id_field, 'title_tokens': title_field_masked, 'title_tokens_original': title_field, 'title_tokens_mask': ArrayField(np.array(title_mask_binary)), 'seq_masked': ArrayField(np.array(mask_binary)), 'seq_tf_info': ArrayField(np.array(tf_class)), 'seq_tokens': seq_field, 'seq_tokens_original': seq_field_orig})
else:
return Instance({'seq_id': seq_id_field, 'seq_masked': ArrayField(np.array(mask_binary)), 'seq_tf_info': ArrayField(np.array(tf_class)), 'seq_tokens': seq_field, 'seq_tokens_original': seq_field_orig}) |
def test_read_meshes():
from sfepy.discrete.fem import Mesh
conf_dir = op.dirname(__file__)
oks = []
for (ii, filename) in enumerate(filename_meshes):
tst.report(('%d. mesh: %s' % ((ii + 1), filename)))
try:
mesh = Mesh.from_file(filename, prefix_dir=conf_dir)
except Exception as exc:
tst.report(exc)
tst.report('read failed!')
oks.append(False)
continue
try:
assert_((mesh.dim == mesh.coors.shape[1]))
assert_((mesh.n_nod == mesh.coors.shape[0]))
assert_((mesh.n_nod == mesh.cmesh.vertex_groups.shape[0]))
assert_((mesh.n_el == mesh.cmesh.num[mesh.cmesh.tdim]))
except ValueError as exc:
tst.report(exc)
tst.report('read assertion failed!')
oks.append(False)
continue
oks.append(True)
tst.report('read ok')
assert_(all(oks)) |
class Upsample2DBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(Upsample2DBlock, self).__init__()
assert (kernel_size == 2)
assert (stride == 2)
self.block = nn.Sequential(nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=0, output_padding=0), nn.BatchNorm2d(out_planes), nn.ReLU(True))
def forward(self, x):
return self.block(x) |
def convert(idx):
global fnames
fname = fnames[idx]
dataset = tf.data.TFRecordDataset(fname, compression_type='')
for (frame_id, data) in enumerate(dataset):
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
decoded_frame = waymo_decoder.decode_frame(frame, frame_id)
decoded_annos = waymo_decoder.decode_annos(frame, frame_id)
with open(os.path.join(LIDAR_PATH, 'seq_{}_frame_{}.pkl'.format(idx, frame_id)), 'wb') as f:
pickle.dump(decoded_frame, f)
with open(os.path.join(ANNO_PATH, 'seq_{}_frame_{}.pkl'.format(idx, frame_id)), 'wb') as f:
pickle.dump(decoded_annos, f) |
class TRStr(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
Bf = _swig_property(_snap.TRStr_Bf_get, _snap.TRStr_Bf_set)
Refs = _swig_property(_snap.TRStr_Refs_get, _snap.TRStr_Refs_set)
__swig_destroy__ = _snap.delete_TRStr
def __init__(self, *args):
_snap.TRStr_swiginit(self, _snap.new_TRStr(*args))
def Save(self, SOut, IsSmall):
return _snap.TRStr_Save(self, SOut, IsSmall)
def GetMemUsed(self):
return _snap.TRStr_GetMemUsed(self)
def MkRef(self):
return _snap.TRStr_MkRef(self)
def UnRef(self):
return _snap.TRStr_UnRef(self)
def CStr(self, *args):
return _snap.TRStr_CStr(self, *args)
def Empty(self):
return _snap.TRStr_Empty(self)
def Len(self):
return _snap.TRStr_Len(self)
def PutCh(self, ChN, Ch):
return _snap.TRStr_PutCh(self, ChN, Ch)
def GetCh(self, ChN):
return _snap.TRStr_GetCh(self, ChN)
def IsUc(self):
return _snap.TRStr_IsUc(self)
def ToUc(self):
return _snap.TRStr_ToUc(self)
def IsLc(self):
return _snap.TRStr_IsLc(self)
def ToLc(self):
return _snap.TRStr_ToLc(self)
def ToCap(self):
return _snap.TRStr_ToCap(self)
def ConvUsFromYuAscii(self):
return _snap.TRStr_ConvUsFromYuAscii(self)
def CmpI(CStr1, CStr2):
return _snap.TRStr_CmpI(CStr1, CStr2)
CmpI = staticmethod(CmpI)
def GetPrimHashCd(self):
return _snap.TRStr_GetPrimHashCd(self)
def GetSecHashCd(self):
return _snap.TRStr_GetSecHashCd(self)
def GetNullRStr():
return _snap.TRStr_GetNullRStr()
GetNullRStr = staticmethod(GetNullRStr) |
def test_return_none(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert (capture == 'Allocating parent.')
with capture:
p.returnNullChildKeepAliveChild()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 1))
assert (capture == '')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == 'Releasing parent.')
with capture:
p = m.Parent()
assert (capture == 'Allocating parent.')
with capture:
p.returnNullChildKeepAliveParent()
assert (ConstructorStats.detail_reg_inst() == (n_inst + 1))
assert (capture == '')
with capture:
del p
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == 'Releasing parent.') |
def test_default_parameters() -> None:
mapie_cal = MapieCalibrator()
assert (mapie_cal.method == 'top_label')
assert (mapie_cal.calibrator is None)
assert (mapie_cal.cv == 'split') |
.parametrize('shape', [[], [1], [2], [1, 2, 3]])
def test_exact_thompson_sampler_sample_raises_for_invalid_at_shape(shape: ShapeLike) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExactThompsonSampler().sample(QuadraticMeanAndRBFKernel(), 5, tf.zeros(shape)) |
_inherit(core.Dataset)
class Dataset(core.Dataset):
def __init__(self, data_home=None):
super().__init__(data_home, name='dcase_bioacoustic', clip_class=Clip, bibtex=BIBTEX, remotes=REMOTES, license_info=LICENSE_INFO)
_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
_property
def _metadata(self):
metadata_index = {clip_id: {'subdataset': os.path.normpath(v['csv'][0]).split(clip_id)[0].split(os.path.sep)[(- 2)], 'split': ('train' if ('Training' in os.path.normpath(v['csv'][0]).split(clip_id)[0]) else ('validation' if ('Validation' in os.path.normpath(v['csv'][0]).split(clip_id)[0]) else 'evaluation'))} for (clip_id, v) in self._index['clips'].items()}
metadata_paths = {'train': os.path.join(self.data_home, 'DCASE2022_task5_Training_set_classes.csv'), 'validation': os.path.join(self.data_home, 'DCASE2022_task5_Validation_set_classes.csv')}
metadata_index['class_codes'] = {}
metadata_index['subdatasets'] = {}
for (split, metadata_path) in metadata_paths.items():
metadata_path = os.path.normpath(metadata_path)
if (not os.path.exists(metadata_path)):
raise FileNotFoundError('Metadata not found. Did you run .download()?')
with open(metadata_path, 'r') as fhandle:
reader = csv.reader(fhandle, delimiter=',')
headers = next(reader)
class_code_id = headers.index('class_code')
class_name_id = headers.index('class_name')
dataset_id = headers.index('dataset')
for line in reader:
metadata_index['class_codes'][line[class_code_id]] = {'subdataset': line[dataset_id], 'class_name': line[class_name_id], 'split': split}
if (line[dataset_id] not in metadata_index['subdatasets']):
metadata_index['subdatasets'][line[dataset_id]] = [line[class_code_id]]
else:
metadata_index['subdatasets'][line[dataset_id]].append(line[class_code_id])
return metadata_index |
class MetricsMeanSquaredError(Metrics):
def __init__(self, dtype=bb.DType.FP32):
core_metrics = bb.search_core_object('MetricsMeanSquaredError', [dtype]).create()
super(MetricsMeanSquaredError, self).__init__(core_metrics=core_metrics) |
def apply_to_all_elements(x, fn):
if (type(x) not in (list, tuple)):
return fn(x)
return [apply_to_all_elements(y, fn) for y in x] |
class MobileNetV3(MyNetwork):
def __init__(self, first_conv, blocks, final_expand_layer, feature_mix_layer, classifier):
super(MobileNetV3, self).__init__()
self.first_conv = first_conv
self.blocks = nn.ModuleList(blocks)
self.final_expand_layer = final_expand_layer
self.feature_mix_layer = feature_mix_layer
self.classifier = classifier
def forward(self, x):
x = self.first_conv(x)
for block in self.blocks:
x = block(x)
x = self.final_expand_layer(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True)
x = self.feature_mix_layer(x)
x = torch.squeeze(x)
x = self.classifier(x)
return x
def module_str(self):
_str = (self.first_conv.module_str + '\n')
for block in self.blocks:
_str += (block.module_str + '\n')
_str += (self.final_expand_layer.module_str + '\n')
_str += (self.feature_mix_layer.module_str + '\n')
_str += self.classifier.module_str
return _str
def config(self):
return {'name': MobileNetV3.__name__, 'bn': self.get_bn_param(), 'first_conv': self.first_conv.config, 'blocks': [block.config for block in self.blocks], 'final_expand_layer': self.final_expand_layer.config, 'feature_mix_layer': self.feature_mix_layer.config, 'classifier': self.classifier.config}
def build_from_config(config):
first_conv = set_layer_from_config(config['first_conv'])
final_expand_layer = set_layer_from_config(config['final_expand_layer'])
feature_mix_layer = set_layer_from_config(config['feature_mix_layer'])
classifier = set_layer_from_config(config['classifier'])
blocks = []
for block_config in config['blocks']:
blocks.append(MobileInvertedResidualBlock.build_from_config(block_config))
net = MobileNetV3(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
if ('bn' in config):
net.set_bn_param(**config['bn'])
else:
net.set_bn_param(momentum=0.1, eps=0.001)
return net
def zero_last_gamma(self):
for m in self.modules():
if isinstance(m, MobileInvertedResidualBlock):
if (isinstance(m.mobile_inverted_conv, MBInvertedConvLayer) and isinstance(m.shortcut, IdentityLayer)):
m.mobile_inverted_conv.point_linear.bn.weight.data.zero_() |
def starts_stops_to_index(starts, stops):
toindex = []
for x in range(len(starts)):
if ((stops[x] - starts[x]) > 0):
for y in range((stops[x] - starts[x])):
toindex.append((starts[x] + y))
else:
toindex.append(starts[x])
return toindex |
class Partition7(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[23]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:7'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'encoder.block.21.layer.0.layer_norm', 'l_1': 'encoder.block.21.layer.0.SelfAttention.q', 'l_2': 'encoder.block.21.layer.0.SelfAttention.k', 'l_3': 'encoder.block.21.layer.0.SelfAttention.v', 'l_4': 'encoder.block.21.layer.0.SelfAttention.o', 'l_5': 'encoder.block.21.layer.0.dropout', 'l_6': 'encoder.block.21.layer.1.layer_norm', 'l_7': 'encoder.block.21.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.21.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.21.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.21.layer.1.dropout', 'l_11': 'encoder.block.22.layer.0.layer_norm', 'l_12': 'encoder.block.22.layer.0.SelfAttention.q', 'l_13': 'encoder.block.22.layer.0.SelfAttention.k', 'l_14': 'encoder.block.22.layer.0.SelfAttention.v', 'l_15': 'encoder.block.22.layer.0.SelfAttention.o', 'l_16': 'encoder.block.22.layer.0.dropout', 'l_17': 'encoder.block.22.layer.1.layer_norm', 'l_18': 'encoder.block.22.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.22.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.22.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.22.layer.1.dropout', 'l_22': 'encoder.block.23.layer.0.layer_norm', 'l_23': 'encoder.block.23.layer.0.SelfAttention.q', 'l_24': 'encoder.block.23.layer.0.SelfAttention.k', 'l_25': 'encoder.block.23.layer.0.SelfAttention.v', 'l_26': 'encoder.block.23.layer.0.SelfAttention.o', 'l_27': 'encoder.block.23.layer.0.dropout', 'l_28': 'encoder.block.23.layer.1.layer_norm', 'l_29': 'encoder.block.23.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.23.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.23.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.23.layer.1.dropout', 'l_33': 'encoder.final_layer_norm', 'l_34': 'encoder.dropout'}
self.to(self.device)
def forward(self, *args):
(x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(t_0)
t_2 = self.l_2(t_0)
t_3 = self.l_3(t_0)
t_0 = t_0.shape
t_0 = t_0[slice(None, 2, None)]
t_0 = t_0[0]
t_1 = t_1.view(t_0, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_2 = t_2.view(t_0, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_3 = t_3.view(t_0, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_1, t_2)
t_2 += x1
t_1 = t_2.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_1.type_as(t_2)
t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False)
t_3 = torch.matmul(t_2, t_3)
t_3 = t_3.transpose(1, 2)
t_3 = t_3.contiguous()
t_0 = t_3.view(t_0, (- 1), 4096)
t_0 = self.l_4(t_0)
t_3 = self.l_5(t_0)
t_3 = (x0 + t_3)
t_0 = (t_0, None, x1)
t_2 = t_0[0]
t_3 = (t_3,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_3 + t_0)
t_3 = t_0[slice(None, 2, None)]
t_1 = t_3[0]
t_4 = self.l_6(t_1)
t_3 = t_3[1]
t_0 = t_0[slice(2, None, None)]
t_4 = self.l_7(t_4)
t_4 = torch.nn.functional.relu(t_4, inplace=False)
t_4 = self.l_8(t_4)
t_4 = self.l_9(t_4)
t_4 = self.l_10(t_4)
t_4 = (t_1 + t_4)
t_3 = (t_4, t_3)
t_0 = (t_3 + t_0)
t_3 = t_0[slice(None, 2, None)]
t_3 = t_3[0]
t_4 = self.l_11(t_3)
t_0 = t_0[2]
t_1 = self.l_12(t_4)
t_5 = self.l_13(t_4)
t_6 = self.l_14(t_4)
t_4 = t_4.shape
t_4 = t_4[slice(None, 2, None)]
t_4 = t_4[0]
t_1 = t_1.view(t_4, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_5 = t_5.view(t_4, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_6 = t_6.view(t_4, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_5 = t_5.transpose(3, 2)
t_5 = torch.matmul(t_1, t_5)
t_5 += t_0
t_1 = t_5.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_5 = t_1.type_as(t_5)
t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False)
t_6 = torch.matmul(t_5, t_6)
t_6 = t_6.transpose(1, 2)
t_6 = t_6.contiguous()
t_4 = t_6.view(t_4, (- 1), 4096)
t_4 = self.l_15(t_4)
t_6 = self.l_16(t_4)
t_6 = (t_3 + t_6)
t_0 = (t_4, None, t_0)
t_4 = t_0[0]
t_6 = (t_6,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_6 + t_0)
t_6 = t_0[slice(None, 2, None)]
t_3 = t_6[0]
t_5 = self.l_17(t_3)
t_6 = t_6[1]
t_0 = t_0[slice(2, None, None)]
t_5 = self.l_18(t_5)
t_5 = torch.nn.functional.relu(t_5, inplace=False)
t_5 = self.l_19(t_5)
t_5 = self.l_20(t_5)
t_5 = self.l_21(t_5)
t_5 = (t_3 + t_5)
t_6 = (t_5, t_6)
t_0 = (t_6 + t_0)
t_6 = t_0[slice(None, 2, None)]
t_6 = t_6[0]
t_5 = self.l_22(t_6)
t_0 = t_0[2]
t_3 = self.l_23(t_5)
t_1 = self.l_24(t_5)
t_7 = self.l_25(t_5)
t_5 = t_5.shape
t_5 = t_5[slice(None, 2, None)]
t_5 = t_5[0]
t_3 = t_3.view(t_5, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_1 = t_1.view(t_5, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_7 = t_7.view(t_5, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_1 = t_1.transpose(3, 2)
t_1 = torch.matmul(t_3, t_1)
t_1 += t_0
t_3 = t_1.float()
t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None)
t_1 = t_3.type_as(t_1)
t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False)
t_7 = torch.matmul(t_1, t_7)
t_7 = t_7.transpose(1, 2)
t_7 = t_7.contiguous()
t_5 = t_7.view(t_5, (- 1), 4096)
t_5 = self.l_26(t_5)
t_7 = self.l_27(t_5)
t_7 = (t_6 + t_7)
t_0 = (t_5, None, t_0)
t_5 = t_0[0]
t_7 = (t_7,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_7 + t_0)
t_7 = t_0[slice(None, 2, None)]
t_6 = t_7[0]
t_1 = self.l_28(t_6)
t_7 = t_7[1]
t_0 = t_0[slice(2, None, None)]
t_1 = self.l_29(t_1)
t_1 = torch.nn.functional.relu(t_1, inplace=False)
t_1 = self.l_30(t_1)
t_1 = self.l_31(t_1)
t_1 = self.l_32(t_1)
t_1 = (t_6 + t_1)
t_7 = (t_1, t_7)
t_0 = (t_7 + t_0)
t_7 = t_0[slice(None, 2, None)]
t_7 = t_7[0]
t_7 = self.l_33(t_7)
t_0 = t_0[2]
t_7 = self.l_34(t_7)
return (t_7,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
.parametrize('with_data', [pytest.param(True), pytest.param(False)])
.parametrize('language', [pytest.param('CPP'), pytest.param('Python')])
def test_map_with_tasklets(language: str, with_data: bool):
sdfg = _make_sdfg(language, with_data)
sdfg.compile()
sdfg.simplify()
num = sdfg.apply_transformations_repeated(TaskletFusion)
assert (num == 3)
func = sdfg.compile()
A = np.arange(1, (N + 1), dtype=np_datatype)
B = np.arange(1, (M + 1), dtype=np_datatype)
C = np.zeros((M,), dtype=np_datatype)
func(A=A, B=B, C=C)
ref = map_with_tasklets.f(A, B)
assert np.allclose(C, ref) |
def save_video(save_dir, file_name, frames, episode_id=0):
filename = os.path.join(save_dir, (file_name + '_episode_{}'.format(episode_id)))
if (not os.path.exists(filename)):
os.makedirs(filename)
num_frames = frames.shape[0]
for i in range(num_frames):
img = Image.fromarray(np.flipud(frames[i]), 'RGB')
img.save(os.path.join(filename, 'frame_{}.png'.format(i))) |
class YahooAnswers(XiangZhangDataset):
dirname = 'yahoo_answers_csv'
columns = ['class_index', 'question_title', 'question_content', 'best_answer'] |
def register_Ns3LteUeNetDevice_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('GetMac', 'ns3::Ptr< ns3::LteUeMac >', [], is_const=True)
cls.add_method('GetRrc', 'ns3::Ptr< ns3::LteUeRrc >', [], is_const=True)
cls.add_method('GetPhy', 'ns3::Ptr< ns3::LteUePhy >', [], is_const=True)
cls.add_method('GetNas', 'ns3::Ptr< ns3::EpcUeNas >', [], is_const=True)
cls.add_method('GetComponentCarrierManager', 'ns3::Ptr< ns3::LteUeComponentCarrierManager >', [], is_const=True)
cls.add_method('GetImsi', 'uint64_t', [], is_const=True)
cls.add_method('GetDlEarfcn', 'uint32_t', [], is_const=True)
cls.add_method('SetDlEarfcn', 'void', [param('uint32_t', 'earfcn')])
cls.add_method('GetCsgId', 'uint32_t', [], is_const=True)
cls.add_method('SetCsgId', 'void', [param('uint32_t', 'csgId')])
cls.add_method('SetTargetEnb', 'void', [param('ns3::Ptr< ns3::LteEnbNetDevice >', 'enb')])
cls.add_method('GetTargetEnb', 'ns3::Ptr< ns3::LteEnbNetDevice >', [])
cls.add_method('SetCcMap', 'void', [param('std::map< unsigned char, ns3::Ptr< ns3::ComponentCarrierUe > >', 'ccm')])
cls.add_method('GetCcMap', 'std::map< unsigned char, ns3::Ptr< ns3::ComponentCarrierUe > >', [])
cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True)
return |
def test_broadcast_single_bool():
base = ak.Array([[{'x': 0.1, 'y': 0.2, 'z': 0.3}, {'x': 0.4, 'y': 0.5, 'z': 0.6}]])
base_new1 = ak.operations.with_field(base, True, 'always_true')
assert (to_list(base_new1.always_true) == [[True, True]])
base_new2 = ak.operations.with_field(base_new1, (base.x > 0.3), 'sometimes_true')
assert (to_list(base_new2.always_true) == [[True, True]])
assert (ak.operations.fields(base_new2) == ['x', 'y', 'z', 'always_true', 'sometimes_true']) |
def overall_accuracy_calc(TP, POP):
try:
overall_accuracy = (sum(TP.values()) / POP)
return overall_accuracy
except Exception:
return 'None' |
class DiagPC(object):
def setUp(self, pc):
A = pc.getOperators()[0]
self.idiag = (1.0 / A.getDiagonal())
def apply(self, pc, x, y):
y.pointwiseMult(x, self.idiag) |
class TestAlignments(object):
def source_words(self):
return [['a', 'c', 'b', 'c'], ['1', '3', '2', '2', '2'], []]
def target_words(self):
return [['c', 'z', 'b', 'c'], ['1', 'c'], ['2', '4']]
def aligns(self, source_words, target_words):
return Alignments(source_words, target_words)
def test(self, aligns):
assert_tensor_equal(aligns.indices, [[[1, 3], [0, 0], [2, 0], [1, 3]], [[0, 0], [0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0], [0, 0]]])
assert_tensor_equal(aligns.mask, [[[1, 1], [0, 0], [1, 0], [1, 1]], [[1, 0], [0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0], [0, 0]]])
def test_split(self, aligns):
items = aligns.split()
assert (len(items) == 4)
assert_tensor_equal(items[0].values, [[1, 3], [0, 0], [0, 0]])
assert_tensor_equal(items[0].mask, [[1, 1], [1, 0], [0, 0]])
assert_tensor_equal(items[2].values, [[2, 0], [0, 0], [0, 0]])
assert_tensor_equal(items[2].mask, [[1, 0], [0, 0], [0, 0]]) |
def simplify(save_dir, save_name, nets, total, sup_config):
dataloader_dict = {}
(hps, seeds) = (['12'], set())
for hp in hps:
sub_save_dir = (save_dir / 'raw-data-{:}'.format(hp))
ckps = sorted(list(sub_save_dir.glob('arch-*-seed-*.pth')))
seed2names = defaultdict(list)
for ckp in ckps:
parts = re.split('-|\\.', ckp.name)
seed2names[parts[3]].append(ckp.name)
print('DIR : {:}'.format(sub_save_dir))
nums = []
for (seed, xlist) in seed2names.items():
seeds.add(seed)
nums.append(len(xlist))
print(' [seed={:}] there are {:} checkpoints.'.format(seed, len(xlist)))
assert (len(nets) == total == max(nums)), 'there are some missed files : {:} vs {:}'.format(max(nums), total)
print('{:} start simplify the checkpoint.'.format(time_string()))
datasets = ('ninapro', 'darcyflow')
full_save_dir = (save_dir / (save_name + '-FULL'))
simple_save_dir = (save_dir / (save_name + '-SIMPLIFY'))
full_save_dir.mkdir(parents=True, exist_ok=True)
simple_save_dir.mkdir(parents=True, exist_ok=True)
(arch2infos, evaluated_indexes) = (dict(), set())
(end_time, arch_time) = (time.time(), AverageMeter())
temp_final_infos = {'meta_archs': nets, 'total_archs': total, 'arch2infos': None, 'evaluated_indexes': set()}
pickle_save(temp_final_infos, str((full_save_dir / 'meta.pickle')))
pickle_save(temp_final_infos, str((simple_save_dir / 'meta.pickle')))
for index in tqdm(range(total)):
arch_str = nets[index]
hp2info = OrderedDict()
full_save_path = (full_save_dir / '{:06d}.pickle'.format(index))
simple_save_path = (simple_save_dir / '{:06d}.pickle'.format(index))
for hp in hps:
sub_save_dir = (save_dir / 'raw-data-{:}'.format(hp))
ckps = [(sub_save_dir / 'arch-{:06d}-seed-{:}.pth'.format(index, seed)) for seed in seeds]
ckps = [x for x in ckps if x.exists()]
if (len(ckps) == 0):
raise ValueError('Invalid data : index={:}, hp={:}'.format(index, hp))
arch_info = account_one_arch(index, arch_str, ckps, datasets, dataloader_dict)
hp2info[hp] = arch_info
hp2info = correct_time_related_info(index, hp2info)
evaluated_indexes.add(index)
to_save_data = OrderedDict({'12': hp2info['12'].state_dict()})
pickle_save(to_save_data, str(full_save_path))
for hp in hps:
hp2info[hp].clear_params()
to_save_data = OrderedDict({'12': hp2info['12'].state_dict()})
pickle_save(to_save_data, str(simple_save_path))
arch2infos[index] = to_save_data
arch_time.update((time.time() - end_time))
end_time = time.time()
need_time = '{:}'.format(convert_secs2time((arch_time.avg * ((total - index) - 1)), True))
print('{:} {:} done.'.format(time_string(), save_name))
final_infos = {'meta_archs': nets, 'total_archs': total, 'arch2infos': arch2infos, 'evaluated_indexes': evaluated_indexes}
save_file_name = (save_dir / '{:}.pickle'.format(save_name))
pickle_save(final_infos, str(save_file_name))
hd5sum = get_md5_file((str(save_file_name) + '.pbz2'))
hd5_file_name = (save_dir / '{:}-{:}.pickle.pbz2'.format(NATS_TSS_BASE_NAME, hd5sum))
shutil.move((str(save_file_name) + '.pbz2'), hd5_file_name)
print('Save {:} / {:} architecture results into {:} -> {:}.'.format(len(evaluated_indexes), total, save_file_name, hd5_file_name))
hd5_full_save_dir = (save_dir / '{:}-{:}-full'.format(NATS_TSS_BASE_NAME, hd5sum))
hd5_simple_save_dir = (save_dir / '{:}-{:}-simple'.format(NATS_TSS_BASE_NAME, hd5sum))
shutil.move(full_save_dir, hd5_full_save_dir)
shutil.move(simple_save_dir, hd5_simple_save_dir) |
def pandas_data_to_tetrad(df: DataFrame, int_as_cont=False):
dtypes = ['float16', 'float32', 'float64']
if int_as_cont:
for i in range(3, 7):
dtypes.append(f'int{(2 ** i)}')
dtypes.append(f'uint{(2 ** i)}')
cols = df.columns
discrete_cols = [col for col in cols if (df[col].dtypes not in dtypes)]
category_map = {col: {val: i for (i, val) in enumerate(df[col].unique())} for col in discrete_cols}
df = df.replace(category_map)
values = df.values
(n, p) = df.shape
variables = util.ArrayList()
for col in cols:
if (col in discrete_cols):
categories = util.ArrayList()
for category in category_map[col]:
categories.add(str(category))
variables.add(td.DiscreteVariable(str(col), categories))
else:
variables.add(td.ContinuousVariable(str(col)))
if (len(discrete_cols) == len(cols)):
databox = td.IntDataBox(n, p)
elif (len(discrete_cols) == 0):
databox = td.DoubleDataBox(n, p)
else:
databox = td.MixedDataBox(variables, n)
for (col, var) in enumerate(values.T):
for (row, val) in enumerate(var):
databox.set(row, col, val)
return td.BoxDataSet(databox, variables) |
def init_pretrained_weights(model, key=''):
import os
import errno
import gdown
from collections import OrderedDict
import warnings
import logging
logger = logging.getLogger(__name__)
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(os.getenv(ENV_TORCH_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if (e.errno == errno.EEXIST):
pass
else:
raise
filename = (key + '_imagenet.pth')
cached_file = os.path.join(model_dir, filename)
if (not os.path.exists(cached_file)):
logger.info(f"Pretrain model don't exist, downloading from {model_urls[key]}")
if comm.is_main_process():
gdown.download(model_urls[key], cached_file, quiet=False)
comm.synchronize()
state_dict = torch.load(cached_file, map_location=torch.device('cpu'))
model_dict = model.state_dict()
new_state_dict = OrderedDict()
(matched_layers, discarded_layers) = ([], [])
for (k, v) in state_dict.items():
if k.startswith('module.'):
k = k[7:]
if ((k in model_dict) and (model_dict[k].size() == v.size())):
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
return model_dict |
def cleanup(ax):
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='on', left='off', right='off', labelleft='on') |
def register_Ns3SimpleOfdmWimaxPhy_methods(root_module, cls):
cls.add_constructor([param('ns3::SimpleOfdmWimaxPhy const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('char *', 'tracesPath')])
cls.add_method('ActivateLoss', 'void', [param('bool', 'loss')])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True)
cls.add_method('DoAttach', 'void', [param('ns3::Ptr< ns3::WimaxChannel >', 'channel')], is_virtual=True)
cls.add_method('GetBandwidth', 'uint32_t', [], is_const=True)
cls.add_method('GetNoiseFigure', 'double', [], is_const=True)
cls.add_method('GetPhyType', 'ns3::WimaxPhy::PhyType', [], is_const=True, is_virtual=True)
cls.add_method('GetTxPower', 'double', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('NotifyRxBegin', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('NotifyRxDrop', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('NotifyRxEnd', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('NotifyTxBegin', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('NotifyTxDrop', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('NotifyTxEnd', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction')])
cls.add_method('Send', 'void', [param('ns3::SendParams *', 'params')], is_virtual=True)
cls.add_method('SetBandwidth', 'void', [param('uint32_t', 'BW')])
cls.add_method('SetNoiseFigure', 'void', [param('double', 'nf')])
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::PacketBurst >, ns3::Ptr< ns3::WimaxConnection >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
cls.add_method('SetSNRToBlockErrorRateTracesPath', 'void', [param('char *', 'tracesPath')])
cls.add_method('SetTxPower', 'void', [param('double', 'txPower')])
cls.add_method('StartReceive', 'void', [param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPower'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoGetDataRate', 'uint32_t', [param('ns3::WimaxPhy::ModulationType', 'modulationType')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetFrameDuration', 'ns3::Time', [param('uint8_t', 'frameDurationCode')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetFrameDurationCode', 'uint8_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetGValue', 'double', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetNfft', 'uint16_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetNrBytes', 'uint64_t', [param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetNrSymbols', 'uint64_t', [param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetRtg', 'uint16_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetSamplingFactor', 'double', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetSamplingFrequency', 'double', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetTransmissionTime', 'ns3::Time', [param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetTtg', 'uint16_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoSetDataRates', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoSetPhyParameters', 'void', [], visibility='private', is_virtual=True)
return |
(scope='function', autouse=True)
def scope_function():
nn.set_auto_forward(False)
nn.clear_parameters()
nn.graph_def.reset_default_graph()
ctx = nn.get_current_context()
(yield)
nn.set_default_context(ctx) |
_request
def s3_etag(url, proxies=None):
s3_resource = boto3.resource('s3', config=Config(proxies=proxies))
(bucket_name, s3_path) = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag |
def save_results(model, train_results, dev_results, test_results, results_fname):
results = [['n_classes', 'embedding_size', 'hidden_size', 'nlayers', 'dropout_p', 'train_loss', 'dev_loss', 'test_loss', 'train_acc', 'dev_acc', 'test_acc']]
results += [[model.n_classes, model.embedding_size, model.hidden_size, model.nlayers, model.dropout_p, train_results['loss'], dev_results['loss'], test_results['loss'], train_results['acc'], dev_results['acc'], test_results['acc']]]
util.write_csv(results_fname, results) |
def test_is_failing(test_case_chromosome):
chromosome = test_case_chromosome
result = MagicMock(ExecutionResult)
result.has_test_exceptions.return_value = True
chromosome.set_last_execution_result(result)
assert chromosome.is_failing() |
def test_xml_dataset():
dataconfig = {'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt', 'img_prefix': 'data/VOCdevkit/VOC2007/', 'pipeline': [{'type': 'LoadImageFromFile'}]}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig) |
class GlobalFeatureImportance(ExplanationBase):
def __init__(self):
super().__init__()
self.explanations = {}
def add(self, feature_names, importance_scores, sort=False, **kwargs):
scores = list(zip(feature_names, importance_scores))
if sort:
scores = sorted(scores, key=(lambda x: abs(x[(- 1)])), reverse=True)
self.explanations = {'features': [s[0] for s in scores], 'scores': [s[1] for s in scores]}
def get_explanations(self):
return self.explanations
def plot(self, num_features=20, truncate_long_features=True, **kwargs):
import matplotlib.pyplot as plt
(fig, axes) = plt.subplots(1, 1)
exp = self.get_explanations()
feat_scores = sorted(list(zip([f'{(self._s(f) if truncate_long_features else f)} ' for f in exp['features']], exp['scores'])), key=(lambda x: abs(x[1])))
if (num_features is not None):
feat_scores = feat_scores[(- num_features):]
fnames = [f for (f, s) in feat_scores]
scores = [s for (f, s) in feat_scores]
colors = [('green' if (x > 0) else 'red') for x in scores]
positions = (np.arange(len(scores)) + 0.5)
plt.sca(axes)
plt.barh(positions, scores, align='center', color=colors)
axes.yaxis.set_ticks_position('right')
plt.yticks(positions, fnames, ha='right')
plt.title(f'Global Feature Importance')
return fig
def _plotly_figure(self, num_features=20, truncate_long_features=True, **kwargs):
import plotly.express as px
exp = self.explanations
title = f'Global Feature Importance'
feat_scores = sorted(list(zip([f'{(self._s(f) if truncate_long_features else f)}' for f in exp['features']], exp['scores'])), key=(lambda x: abs(x[1])))
if (num_features is not None):
feat_scores = feat_scores[(- num_features):]
fnames = [f for (f, s) in feat_scores]
scores = [s for (f, s) in feat_scores]
fig = px.bar(y=fnames, x=scores, orientation='h', labels={'x': 'Importance scores', 'y': 'Features'}, title=title, color_discrete_map={True: '#008B8B', False: '#DC143C'})
return fig
def plotly_plot(self, num_features=20, truncate_long_features=True, **kwargs):
return DashFigure(self._plotly_figure(num_features=num_features, truncate_long_features=truncate_long_features, **kwargs))
def ipython_plot(self, num_features=20, truncate_long_features=True, **kwargs):
import plotly
plotly.offline.iplot(self._plotly_figure(num_features=num_features, truncate_long_features=truncate_long_features, **kwargs))
def from_dict(cls, d):
exp = GlobalFeatureImportance()
exp.explanations = d['explanations']
return exp |
def _has_route_to_root(criteria, key, all_keys, connected):
if (key in connected):
return True
if (key not in criteria):
return False
for p in criteria[key].iter_parent():
try:
pkey = all_keys[id(p)]
except KeyError:
continue
if (pkey in connected):
connected.add(key)
return True
if _has_route_to_root(criteria, pkey, all_keys, connected):
connected.add(key)
return True
return False |
def tensor2img(img):
img = img[0].cpu().float().numpy()
if (img.shape[0] == 1):
img = np.tile(img, (3, 1, 1))
img = (((np.transpose(img, (1, 2, 0)) + 1) / 2.0) * 255.0)
return img.astype(np.uint8) |
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10, in_ch=3, in_dim=32, bn=True):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.bn = bn
self.conv1 = nn.Conv2d(in_ch, self.in_planes, kernel_size=3, padding=1)
self.bn1 = (nn.BatchNorm2d(self.in_planes) if bn else nn.Identity())
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
self.linear1 = nn.Linear((self.in_planes * ((in_dim // 4) ** 2)), 512)
self.bn_dense = (nn.BatchNorm1d(512) if bn else nn.Identity())
self.linear2 = nn.Linear(512, num_classes)
def _make_layer(self, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride, bn=self.bn))
self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= Block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = Flatten()(out)
out = F.relu(self.bn_dense(self.linear1(out)))
out = self.linear2(out)
return out |
class IntersphinxCache():
def __init__(self):
self.inventories = {}
self.real_fetch_inventory = sphinx.ext.intersphinx.fetch_inventory
sphinx.ext.intersphinx.fetch_inventory = self.fetch_inventory
def fetch_inventory(self, app, uri, inv):
t = (uri, inv)
try:
return self.inventories[t]
except KeyError:
i = self.real_fetch_inventory(app, uri, inv)
self.inventories[t] = i
return i |
def known_nicknames():
nicknames = list((value for (key, value) in TRANSFORMER_NICKNAMES.items()))
nicknames.append('transformer')
nicknames = sorted(nicknames, key=(lambda x: (- len(x))))
return nicknames |
def test_is_invertible_module_wrapped():
X = torch.zeros(1, 10, 10, 10)
assert (not is_invertible_module(InvertibleModuleWrapper(torch.nn.Conv2d(10, 10, kernel_size=(1, 1))), test_input_shape=X.shape))
fn = InvertibleModuleWrapper(AdditiveCoupling(SubModule(), implementation_bwd=(- 1), implementation_fwd=(- 1)))
assert is_invertible_module(fn, test_input_shape=X.shape)
class FakeInverse(torch.nn.Module):
def forward(self, x):
return (x * 4)
def inverse(self, y):
return (y * 8)
assert (not is_invertible_module(InvertibleModuleWrapper(FakeInverse()), test_input_shape=X.shape)) |
_task('denoising')
class DenoisingTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for dataset')
parser.add_argument('--sample-break-mode', default='complete_doc', type=str, help='mode for breaking sentence')
parser.add_argument('--mask', default=0.0, type=float, help='fraction of words/subwords that will be masked')
parser.add_argument('--mask-random', default=0.0, type=float, help='instead of using [MASK], use random token this often')
parser.add_argument('--insert', default=0.0, type=float, help='insert this percentage of additional random tokens')
parser.add_argument('--permute', default=0.0, type=float, help='take this proportion of subwords and permute them')
parser.add_argument('--rotate', default=0.5, type=float, help='rotate this proportion of inputs')
parser.add_argument('--poisson-lambda', default=3.0, type=float, help='randomly shuffle sentences for this proportion of inputs')
parser.add_argument('--permute-sentences', default=0.0, type=float, help='shuffle this proportion of sentences in all inputs')
parser.add_argument('--mask-length', default='subword', type=str, choices=['subword', 'word', 'span-poisson'], help='mask length to choose')
parser.add_argument('--replace-length', default=(- 1), type=int, help='when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.mask_idx = self.dictionary.add_symbol('<mask>')
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
if (not hasattr(args, 'shuffle_instance')):
args.shuffle_instance = False
return cls(args, dictionary)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
paths = self.args.data.split(os.pathsep)
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(split_path, self.dictionary, self.args.dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = StripTokenDataset(dataset, self.dictionary.eos())
dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 2), pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, document_sep_len=0)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, self.source_dictionary.eos())
mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if (self.args.mask_length != 'subword') else None)
self.datasets[split] = DenoisingDataset(dataset, dataset.sizes, self.dictionary, self.mask_idx, mask_whole_words, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args)
logger.info('Split: {0}, Loaded {1} samples of denoising_dataset'.format(split, len(self.datasets[split])))
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
.unit
.convert
def test_filter_on_extension_with_predicate():
test_files = ['file_one.fits', 'file_two.fits', 'file_three.exclude']
extensions = ['fits']
expected_list = test_files[:1]
predicate = (lambda f: (f == test_files[1]))
actual_list = convert.filter_on_extension(test_files, extensions, predicate)
assert (expected_list == actual_list) |
def less_equal_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
return ([None] * (len(grad_inputs) + len(inputs))) |
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True)
def test_print_matrix_fstring():
x = ti.Matrix.field(2, 3, dtype=ti.f32, shape=())
y = ti.Vector.field(3, dtype=ti.f32, shape=3)
def func(k: ti.f32):
x[None][(0, 0)] = (- 1.0)
y[2] += 1.0
print(f'hello {x[None]} world!')
print(f'{(y[2] * k)} {(x[None] / k)} {y[2]}')
func(233.3)
ti.sync() |
class GraphSAINT(GraphSamplingBase):
def __init__(self, args, data, train_idx, processed_dir):
super(GraphSAINT, self).__init__(args, data, train_idx, processed_dir)
self.use_norm = args.use_norm
self.dropout = args.dropout
self.args = args
if (args.gnn_type == 'gnn'):
base_gnnconv = SAGEConv
elif (args.gnn_type == 'mlp'):
base_gnnconv = SAGEConvMLP
else:
base_gnnconv = SAGEConv
self.convs = torch.nn.ModuleList()
self.convs.append(base_gnnconv(self.num_feats, self.dim_hidden))
for _ in range((self.num_layers - 2)):
self.convs.append(base_gnnconv(self.dim_hidden, self.dim_hidden))
self.convs.append(base_gnnconv(self.dim_hidden, self.num_classes))
(row, col) = self.edge_index = data.edge_index
data.edge_weight = (1.0 / degree(col, data.num_nodes)[col])
self.train_loader = RWSampler(data, self.batch_size, args.walk_length, num_steps=args.num_steps, save_dir=self.save_dir, sample_coverage=args.sample_coverage, num_workers=0)
self.reset_parameters()
self.saved_args = vars(args)
aggr = ('add' if self.use_norm else 'mean')
self.set_aggr(aggr)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def set_aggr(self, aggr):
for conv in self.convs:
conv.aggr = aggr
def forward(self, x, edge_index, edge_weight=None):
for (i, conv) in enumerate(self.convs):
x = conv(x, edge_index, edge_weight)
if (i != (len(self.convs) - 1)):
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
def train_net(self, input_dict):
device = input_dict['device']
optimizer = input_dict['optimizer']
loss_op = input_dict['loss_op']
total_loss = total_correct = 0
for batch in tqdm(self.train_loader):
batch = batch.to(device)
if (batch.train_mask.sum() == 0):
continue
optimizer.zero_grad()
if self.use_norm:
edge_weight = (batch.edge_norm * batch.edge_weight)
out = self(batch.x, batch.edge_index, edge_weight)
else:
out = self(batch.x, batch.edge_index)
if isinstance(loss_op, torch.nn.NLLLoss):
out = F.log_softmax(out, dim=(- 1))
if self.use_norm:
loss = F.nll_loss(out, batch.y, reduction='none')
loss = (loss * batch.node_norm)[batch.train_mask].sum()
else:
loss = loss_op(out[batch.train_mask], batch.y[batch.train_mask])
else:
loss = loss_op(out[batch.train_mask], batch.y[batch.train_mask].type_as(out))
loss.backward()
torch.nn.utils.clip_grad_norm_(self.parameters(), 5)
optimizer.step()
total_loss += float(loss.item())
if isinstance(loss_op, torch.nn.NLLLoss):
total_correct += int(out.argmax(dim=(- 1)).eq(batch.y).sum())
else:
total_correct += int(out.eq(batch.y).sum())
train_size = (self.train_size if isinstance(loss_op, torch.nn.NLLLoss) else (self.train_size * self.num_classes))
return ((total_loss / len(self.train_loader)), (total_correct / train_size)) |
def createLabelImage(annotation, encoding, outline=None):
size = (annotation.imgWidth, annotation.imgHeight)
if (encoding == 'ids'):
background = name2label['unlabeled'].id
elif (encoding == 'trainIds'):
background = name2label['unlabeled'].trainId
elif (encoding == 'color'):
background = name2label['unlabeled'].color
else:
print("Unknown encoding '{}'".format(encoding))
return None
if (encoding == 'color'):
labelImg = Image.new('RGBA', size, background)
else:
labelImg = Image.new('L', size, background)
drawer = ImageDraw.Draw(labelImg)
for obj in annotation.objects:
label = obj.label
polygon = obj.polygon
if ((not (label in name2label)) and label.endswith('group')):
label = label[:(- len('group'))]
if (not (label in name2label)):
printError("Label '{}' not known.".format(label))
if (name2label[label].id < 0):
continue
if (encoding == 'ids'):
val = name2label[label].id
elif (encoding == 'trainIds'):
val = name2label[label].trainId
elif (encoding == 'color'):
val = name2label[label].color
try:
if outline:
drawer.polygon(polygon, fill=val, outline=outline)
else:
drawer.polygon(polygon, fill=val)
except:
print('Failed to draw polygon with label {}'.format(label))
raise
return labelImg |
class EllipticEU(BuiltinFunction):
def __init__(self):
BuiltinFunction.__init__(self, 'elliptic_eu', nargs=2, conversions=dict(maxima='elliptic_eu'))
def _eval_(self, u, m):
pass
def _evalf_(self, u, m, parent=None, algorithm=None):
R = (parent or s_parent(u))
return _mpmath_utils_call(elliptic_eu_f, u, m, parent=R)
def _derivative_(self, u, m, diff_param):
from sage.functions.jacobi import jacobi, jacobi_am
if (diff_param == 0):
return (sqrt((((- m) * (jacobi('sn', u, m) ** Integer(2))) + Integer(1))) * jacobi('dn', u, m))
elif (diff_param == 1):
return ((((Integer(1) / Integer(2)) * (elliptic_eu(u, m) - elliptic_f(jacobi_am(u, m), m))) / m) - ((((Integer(1) / Integer(2)) * sqrt((((- m) * (jacobi('sn', u, m) ** Integer(2))) + Integer(1)))) * ((((m * jacobi('sn', u, m)) * jacobi('cn', u, m)) - ((m - Integer(1)) * u)) - (elliptic_eu(u, m) * jacobi('dn', u, m)))) / ((m - Integer(1)) * m)))
def _print_latex_(self, u, m):
return ('E(%s;%s)' % (latex(u), latex(m))) |
class CodeVisitor(ast.NodeVisitor):
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
node.op = ast.Sub()
self.generic_visit(node)
def visit_Assign(self, node):
print(('Assign %s' % node.value))
self.generic_visit(node)
def visit_Name(self, node):
print('Name:', node.id)
self.generic_visit(node)
def visit_FunctionDef(self, node):
print(('Function Name:%s' % node.name.op))
self.generic_visit(node)
func_log_stmt = ast.Print(dest=None, values=[ast.Str(s=('calling func: %s' % node.name), lineno=0, col_offset=0)], nl=True, lineno=0, col_offset=0)
node.body.insert(0, func_log_stmt) |
class BertNer(BertForTokenClassification):
def forward(self, input_ids, token_type_ids=None, attention_mask=None, valid_ids=None, device=None):
sequence_output = self.bert(input_ids, token_type_ids, attention_mask, head_mask=None)[0]
(batch_size, max_len, feat_dim) = sequence_output.shape
valid_output = torch.zeros(batch_size, max_len, feat_dim, dtype=torch.float32, device=(device if torch.cuda.is_available() else 'cpu'))
for i in range(batch_size):
jj = (- 1)
for j in range(max_len):
if (valid_ids[i][j].item() == 1):
jj += 1
valid_output[i][jj] = sequence_output[i][j]
sequence_output = self.dropout(valid_output)
logits = self.classifier(sequence_output)
return logits |
class langchain_openai_llm():
def __init__(self, llm_name):
openai.api_key = OPENAI_API_KEY
self.prompt_temp = PromptTemplate(input_variables=['prompt'], template='{prompt}')
self.llm_name = llm_name
def run(self, prompt, temperature=0.9, stop=['\n'], max_tokens=128):
llm = OpenAI(model=self.llm_name, temperature=temperature, stop=['\n'], max_tokens=max_tokens)
chain = LLMChain(llm=llm, prompt=self.prompt_temp)
return chain.run(prompt) |
def AddParameterUpdate(model):
ITER = model.Iter('iter')
LR = model.LearningRate(ITER, 'LR', base_lr=(- 1e-08), policy='step', stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], 'ONE', shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param) |
def _smallest_size_at_least(height, width, smallest_side):
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width), (lambda : (smallest_side / width)), (lambda : (smallest_side / height)))
new_height = tf.to_int32((height * scale))
new_width = tf.to_int32((width * scale))
return (new_height, new_width) |
def assert_graphql(schema):
assert (len(list(schema.get_all_operations())) == 4)
def filter_operations(context):
return context.operation.definition.is_query
for operation in schema.get_all_operations():
assert (not operation.ok().definition.is_mutation) |
def get_training_config(config_path, model_name, dataset):
with open(config_path, 'r') as conf:
full_config = yaml.load(conf, Loader=yaml.FullLoader)
dataset_specific_config = full_config['global']
model_specific_config = full_config[dataset][model_name]
if (model_specific_config is not None):
specific_config = dict(dataset_specific_config, **model_specific_config)
else:
specific_config = dataset_specific_config
specific_config['model_name'] = model_name
return specific_config |
def build_transformer_decoder(cfg, in_channels, mask_classification=True):
name = cfg.MODEL.M2FP.TRANSFORMER_DECODER_NAME
return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification) |
def optimal_state_change(state_tensor, action_tensor, lens, delta, kappa, max_action_state_distance=500):
return _lookforthechange_ops.optimal_state_change(state_tensor.contiguous(), action_tensor.contiguous(), lens, delta, kappa, max_action_state_distance) |
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
adap_pool = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
out = adap_pool(input)
assert ((out.shape[2], out.shape[3]) == (16, 32))
input = torch.rand(1, 1, 16, 17)
out = adap_pool(input)
assert ((out.shape[2], out.shape[3]) == (16, 32))
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (12, 14))
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (10, 13))
kernel_size = (11, 11)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (21, 21))
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
dilation_out = adap_pad(input)
assert ((dilation_out.shape[2], dilation_out.shape[3]) == (16, 21))
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
kernel79_out = adap_pad(input)
assert ((kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21))
assert (kernel79_out.shape == dilation_out.shape)
with pytest.raises(AssertionError):
AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=1) |
_module()
class ABIConvertor(AttnConvertor):
def str2tensor(self, strings):
assert utils.is_type_list(strings, str)
(tensors, padded_targets) = ([], [])
indexes = self.str2idx(strings)
for index in indexes:
tensor = torch.LongTensor((index[:(self.max_seq_len - 1)] + [self.end_idx]))
tensors.append(tensor)
src_target = torch.LongTensor((tensor.size(0) + 1)).fill_(0)
src_target[0] = self.start_idx
src_target[1:] = tensor
padded_target = (torch.ones(self.max_seq_len) * self.padding_idx).long()
char_num = src_target.size(0)
if (char_num > self.max_seq_len):
padded_target = src_target[:self.max_seq_len]
else:
padded_target[:char_num] = src_target
padded_targets.append(padded_target)
padded_targets = torch.stack(padded_targets, 0).long()
return {'targets': tensors, 'padded_targets': padded_targets} |
def pytest_configure(config):
config.addinivalue_line('markers', 'slow: marks test as slow (deselect with \'-m "not slow"\')') |
def __compute_auc_roc(y, loss_mean, loss_max, loss_top6_mean, scores_top6_max_prob, scores_top6_min_logprob, scores_top6_max_entropy, plot_graph=False, plot_histogram=False):
__compute_auc_roc_for_metric(y=y, metric=loss_mean, metric_name_str='loss_mean', plot_graph=plot_graph, plot_histogram=plot_histogram)
__compute_auc_roc_for_metric(y=y, metric=loss_max, metric_name_str='loss_max', plot_graph=plot_graph, plot_histogram=plot_histogram)
__compute_auc_roc_for_metric(y=y, metric=loss_top6_mean, metric_name_str='loss_top6_mean', plot_graph=plot_graph, plot_histogram=plot_histogram)
__compute_auc_roc_for_metric(y=y, metric=scores_top6_max_prob, metric_name_str='scores_top6_max_prob', plot_graph=plot_graph, plot_histogram=plot_histogram)
__compute_auc_roc_for_metric(y=y, metric=scores_top6_min_logprob, metric_name_str='scores_top6_min_logprob', plot_graph=plot_graph, plot_histogram=plot_histogram)
__compute_auc_roc_for_metric(y=y, metric=scores_top6_max_entropy, metric_name_str='scores_top6_max_entropy', plot_graph=plot_graph, plot_histogram=plot_histogram) |
class Finalize(Transition):
def __init__(self, *label):
self.label = tuple(label)
def update_state(self, state, model):
constituents = state.constituents
children = [constituents.value]
constituents = constituents.pop()
label = self.label
return (state.word_position, constituents, (label, children), CloseConstituent)
def is_legal(self, state, model):
return (state.empty_word_queue() and state.has_one_constituent() and (not state.finished(model)))
def short_name(self):
return 'Finalize'
def __repr__(self):
return ('Finalize(%s)' % ','.join(self.label))
def __eq__(self, other):
if (self is other):
return True
if (not isinstance(other, Finalize)):
return False
return (other.label == self.label)
def __hash__(self):
return hash((53, self.label)) |
class GlobalMaxPool(nn.AdaptiveMaxPool2d):
def __init__(self, output_size=1, *args, **kwargs):
super().__init__(output_size) |
def reset_nan_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, val=0):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('reset_nan_backward is not implemented.') |
class TrainArgs(alpaca.TrainArgs):
lora: LoraConfig = LoraConfig()
merged_hf_save_path: Optional[str] = None
merged_hf_upload: Optional[str] = None |
def get_command(id_):
os.environ['DEBUG'] = os.environ.get('DEBUG', 'false')
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
commands_dict = {}
split_id = id_.split('$$$')
checkpoint_path = split_id[1]
id_ = split_id[0]
num_gpus = 1
fb_256_bart_args = [f'--max_source_length 256', f'--max_target_length {FB_BART_MAX_LEN}', f'--fp16 {FB_BART_FP16}', f'--per_device_eval_batch_size {FB_BART_per_device_eval_batch_size}']
fb_512_bart_args = [f'--max_source_length 512', f'--max_target_length {FB_BART_MAX_LEN}', f'--fp16 {FB_BART_FP16}', f'--per_device_eval_batch_size {FB_BART_per_device_eval_batch_size}']
fb_1024_bart_args = [f'--max_source_length {FB_BART_MAX_LEN}', f'--max_target_length {FB_BART_MAX_LEN}', f'--fp16 {FB_BART_FP16}', f'--per_device_eval_batch_size {FB_BART_per_device_eval_batch_size}']
allenai_led_args = [f'--attention_window {ALLEN_AI_ATTENTION_WINDOW}', f'--max_target_length {ALLEN_AI_MAX_TARGET_LEN}', f'--fp16 {ALLEN_AI_FP16}', f'--per_device_eval_batch_size {ALLEN_AI_per_device_eval_batch_size}']
distributed_str = (f'-m torch.distributed.run --nproc_per_node={num_gpus}' if (num_gpus > 1) else '')
for dataset in ['qasper', 'narrative_qa', 'gov_report', 'summ_screen_fd', 'qmsum', 'contract_nli', 'quality', 'quality_difficult']:
base_args = [f'python {distributed_str} src/run.py', f'configs/datasets/{dataset}.json', f'--model_name_or_path {checkpoint_path}', '--m configs/no_metrics.json', '--logging_steps 10', '--preprocessing_num_workers 1', '--predict_with_generate True', '--drop_duplicates_in_eval True', '--num_beams 1']
if (dataset == 'narrative_qa'):
base_args.append('--trim_very_long_strings')
commands_dict[f'{dataset}_256-bart_validation'] = ((base_args + fb_256_bart_args) + ['--do_eval True'])
commands_dict[f'{dataset}_256-bart_test'] = ((base_args + fb_256_bart_args) + ['--do_predict True'])
commands_dict[f'{dataset}_512-bart_validation'] = ((base_args + fb_512_bart_args) + ['--do_eval True'])
commands_dict[f'{dataset}_512-bart_test'] = ((base_args + fb_512_bart_args) + ['--do_predict True'])
commands_dict[f'{dataset}_1024-bart_validation'] = ((base_args + fb_1024_bart_args) + ['--do_eval True'])
commands_dict[f'{dataset}_1024-bart_test'] = ((base_args + fb_1024_bart_args) + ['--do_predict True'])
commands_dict[f'{dataset}_led-1024_validation'] = ((base_args + allenai_led_args) + ['--do_eval True', '--global_attention_first_token True', '--max_source_length 1024'])
commands_dict[f'{dataset}_led-1024_test'] = ((base_args + allenai_led_args) + ['--do_predict True', '--global_attention_first_token True', '--max_source_length 1024'])
commands_dict[f'{dataset}_led-4096_validation'] = ((base_args + allenai_led_args) + ['--do_eval True', '--global_attention_first_token True', '--max_source_length 4096'])
commands_dict[f'{dataset}_led-4096_test'] = ((base_args + allenai_led_args) + ['--do_predict True', '--global_attention_first_token True', '--max_source_length 4096'])
commands_dict[f'{dataset}_led-16384_validation'] = ((base_args + allenai_led_args) + ['--do_eval True', '--global_attention_first_token True', '--max_source_length 16384'])
commands_dict[f'{dataset}_led-16384_test'] = ((base_args + allenai_led_args) + ['--do_predict True', '--global_attention_first_token True', '--max_source_length 16384'])
command_parts = commands_dict[id_]
return prep_command(command_parts) |
.torch
def test_bert_validation_dataset_getitem(sequential_dataset):
batch = Bert4RecValidationDataset(sequential_dataset, sequential_dataset, sequential_dataset, 8)[2]
assert (batch.query_id.item() == 2)
assert all((batch.padding_mask == torch.tensor([0, 0, 0, 0, 0, 0, 1, 1], dtype=torch.bool)))
assert all((batch.tokens_mask == torch.tensor([0, 0, 0, 0, 0, 0, 1, 0], dtype=torch.bool)))
assert all((batch.ground_truth == torch.tensor([1, (- 1), (- 1), (- 1), (- 1), (- 1)])))
assert all((batch.train == torch.tensor([1, (- 2), (- 2), (- 2), (- 2), (- 2)]))) |
def merge_models_nodes(inner_model_node: BaseNode, outer_graph: Graph, inner_graph: Graph) -> List[BaseNode]:
res_nodes = copy.copy(list(outer_graph.nodes))
res_nodes.extend(inner_graph.nodes)
for input_node in inner_graph.get_inputs():
res_nodes.remove(input_node)
res_nodes.remove(inner_model_node)
return res_nodes |
def mis_resblock(x_init, z, channels, use_bias=True, sn=False, scope='mis_resblock'):
with tf.variable_scope(scope):
z = tf.reshape(z, shape=[(- 1), 1, 1, z.shape[(- 1)]])
z = tf.tile(z, multiples=[1, x_init.shape[1], x_init.shape[2], 1])
with tf.variable_scope('mis1'):
x = conv(x_init, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn, scope='conv3x3')
x = instance_norm(x)
x = tf.concat([x, z], axis=(- 1))
x = conv(x, (channels * 2), kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv1x1_0')
x = relu(x)
x = conv(x, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv1x1_1')
x = relu(x)
with tf.variable_scope('mis2'):
x = conv(x, channels, kernel=3, stride=1, pad=1, pad_type='reflect', use_bias=use_bias, sn=sn, scope='conv3x3')
x = instance_norm(x)
x = tf.concat([x, z], axis=(- 1))
x = conv(x, (channels * 2), kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv1x1_0')
x = relu(x)
x = conv(x, channels, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope='conv1x1_1')
x = relu(x)
return (x + x_init) |
def write_wavs(model, inputs, output_dir, external_vocoder=None):
if (external_vocoder is None):
print('The provided model has the vocoder embedded in the graph.\nGenerating waveform directly')
t0 = perf_counter()
(wavs, wav_lengths) = model.run(None, inputs)
infer_secs = (perf_counter() - t0)
mel_infer_secs = vocoder_infer_secs = None
else:
print('[] Generating mel using Matcha')
mel_t0 = perf_counter()
(mels, mel_lengths) = model.run(None, inputs)
mel_infer_secs = (perf_counter() - mel_t0)
print('Generating waveform from mel using external vocoder')
vocoder_inputs = {external_vocoder.get_inputs()[0].name: mels}
vocoder_t0 = perf_counter()
wavs = external_vocoder.run(None, vocoder_inputs)[0]
vocoder_infer_secs = (perf_counter() - vocoder_t0)
wavs = wavs.squeeze(1)
wav_lengths = (mel_lengths * 256)
infer_secs = (mel_infer_secs + vocoder_infer_secs)
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
for (i, (wav, wav_length)) in enumerate(zip(wavs, wav_lengths)):
output_filename = output_dir.joinpath(f'output_{(i + 1)}.wav')
audio = wav[:wav_length]
print(f'Writing audio to {output_filename}')
sf.write(output_filename, audio, 22050, 'PCM_24')
wav_secs = (wav_lengths.sum() / 22050)
print(f'Inference seconds: {infer_secs}')
print(f'Generated wav seconds: {wav_secs}')
rtf = (infer_secs / wav_secs)
if (mel_infer_secs is not None):
mel_rtf = (mel_infer_secs / wav_secs)
print(f'Matcha RTF: {mel_rtf}')
if (vocoder_infer_secs is not None):
vocoder_rtf = (vocoder_infer_secs / wav_secs)
print(f'Vocoder RTF: {vocoder_rtf}')
print(f'Overall RTF: {rtf}') |
def get_strategy():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError as e:
print(e)
print('No TPU detected')
tpu = None
strategy = tf.distribute.get_strategy()
return strategy |
class SymmetricFunctionAlgebra_elementary(multiplicative.SymmetricFunctionAlgebra_multiplicative):
def __init__(self, Sym):
classical.SymmetricFunctionAlgebra_classical.__init__(self, Sym, 'elementary', 'e')
def _dual_basis_default(self):
return self.dual_basis(scalar=None, prefix='f', basis_name='forgotten')
def coproduct_on_generators(self, i):
def P(i):
return (Partition([i]) if i else Partition([]))
T = self.tensor_square()
return T.sum_of_monomials(((P(j), P((i - j))) for j in range((i + 1))))
class Element(classical.SymmetricFunctionAlgebra_classical.Element):
def omega(self):
e = self.parent()
h = e.realization_of().h()
return e(h._from_element(self))
omega_involution = omega
def verschiebung(self, n):
parent = self.parent()
e_coords_of_self = self.monomial_coefficients().items()
dct = {Partition([(i // n) for i in lam]): (((- 1) ** (sum(lam) - (sum(lam) // n))) * coeff) for (lam, coeff) in e_coords_of_self if all((((i % n) == 0) for i in lam))}
result_in_e_basis = parent._from_dict(dct)
return parent(result_in_e_basis)
def expand(self, n, alphabet='x'):
condition = (lambda part: (max(part) > n))
return self._expand(condition, n, alphabet)
def principal_specialization(self, n=infinity, q=None):
from sage.combinat.q_analogues import q_binomial
def get_variable(ring, name):
try:
ring(name)
except TypeError:
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(ring, name).gen()
else:
raise ValueError(('the variable %s is in the base ring, pass it explicitly' % name))
if (q is None):
q = get_variable(self.base_ring(), 'q')
if (q == 1):
if (n == infinity):
raise ValueError('the stable principal specialization at q=1 is not defined')
f = (lambda partition: prod((binomial(n, part) for part in partition)))
elif (n == infinity):
f = (lambda partition: prod((((q ** binomial(part, 2)) / prod(((1 - (q ** i)) for i in range(1, (part + 1))))) for part in partition)))
else:
f = (lambda partition: prod((((q ** binomial(part, 2)) * q_binomial(n, part, q=q)) for part in partition)))
return self.parent()._apply_module_morphism(self, f, q.parent())
def exponential_specialization(self, t=None, q=1):
from sage.combinat.q_analogues import q_factorial
def get_variable(ring, name):
try:
ring(name)
except TypeError:
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(ring, name).gen()
else:
raise ValueError(('the variable %s is in the base ring, pass it explicitly' % name))
if (q == 1):
if (t is None):
t = get_variable(self.base_ring(), 't')
def f(partition):
n = 0
m = 1
for part in partition:
n += part
m *= factorial(part)
return ((t ** n) / m)
return self.parent()._apply_module_morphism(self, f, t.parent())
if ((q is None) and (t is None)):
q = get_variable(self.base_ring(), 'q')
t = get_variable(q.parent(), 't')
elif (q is None):
q = get_variable(t.parent(), 'q')
elif (t is None):
t = get_variable(q.parent(), 't')
def f(partition):
n = 0
m = 1
for part in partition:
n += part
m *= ((q ** binomial(part, 2)) / q_factorial(part, q=q))
return ((t ** n) * m)
return self.parent()._apply_module_morphism(self, f, t.parent()) |
def build_save_graph(nlp, data_root, split, max_len):
scanrefer = json.load(open(os.path.join(data_root, 'ScanRefer', (('ScanRefer_filtered_' + split) + '.json'))))
if (os.path.exists(os.path.join(data_root, 'features', split, 'graph')) == False):
os.makedirs(os.path.join(data_root, 'features', split, 'graph'))
for data in tqdm(scanrefer):
scene_id = data['scene_id']
object_id = int(data['object_id'])
ann_id = int(data['ann_id'])
token = data['token']
if (max_len is not None):
graph = build_graph(nlp, ' '.join(token[:max_len]))
else:
graph = build_graph(nlp, ' '.join(token))
if (max_len is not None):
torch.save(graph, os.path.join(data_root, 'features', split, 'graph', (((((((scene_id + '_') + str(object_id).zfill(3)) + '_') + str(ann_id).zfill(3)) + '_max_len_') + str(max_len).zfill(3)) + '.pth')))
else:
torch.save(graph, os.path.join(data_root, 'features', split, 'graph', (((((scene_id + '_') + str(object_id).zfill(3)) + '_') + str(ann_id).zfill(3)) + '.pth'))) |
class EmailReplyPlayer(RecipePlayer):
def __init__(self, state):
fields = state.fields
by = [element for element in state.dom_elements if ((element.text == fields['by']) and (element.ref in EMAIL_SENDER_REFS))]
by_action = A.MiniWoBElementClick(by[0])
reply_action = (EMAIL_REPLY_REF, None)
type_action = (EMAIL_BODY_REF, fields['message'])
send_action = (EMAIL_SEND_REF, None)
actions = [by_action, reply_action, type_action, send_action]
super(EmailReplyPlayer, self).__init__(actions) |
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super().__init__()
self.model = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, 1, padding=1), nn.ReLU(), nn.Conv2d(out_ch, out_ch, 3, 1, padding=1))
self.downsample = (nn.AvgPool2d(2, stride=stride, padding=0) if (stride != 1) else nn.Sequential())
self.bypass = (nn.Conv2d(in_ch, out_ch, 1, 1, padding=0) if (in_ch != out_ch) else nn.Sequential())
def forward(self, x):
return (self.downsample(self.model(x)) + self.downsample(self.bypass(x))) |
class InitialBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(InitialBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, (out_channels - in_channels), kernel_size=3, stride=2, padding=1, bias=False)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.PReLU()
def forward(self, x):
return self.relu(self.bn(torch.cat([self.conv(x), self.pool(x)], dim=1))) |
def instantiate_models(args, verbose=True):
p = Dict2Obj(args.model)
if (args.task == constants.CL):
if (p.name_model == constants.LENET5):
model = models_cl.__dict__[p.name_model](num_classes=args.num_classes)
elif (p.name_model == constants.SOTASSL):
model = models_cl.__dict__[p.name_model](num_classes=args.num_classes, dropoutnetssl=p.dropoutnetssl, modalities=p.modalities, kmax=p.kmax, kmin=p.kmin, alpha=p.alpha, dropout=p.dropout)
else:
raise ValueError('Unsupported model name: {}.'.format(p.name_model))
elif (args.task == constants.SEG):
if (p.name_model == 'hybrid_model'):
model = models_seg.__dict__[p.name_model](num_classes=args.num_classes, num_masks=args.num_masks, backbone=p.backbone, pretrained=p.pre_trained, modalities=p.modalities, kmax=p.kmax, kmin=p.kmin, alpha=p.alpha, dropout=p.dropout, backbone_dropout=p.backbone_dropout, freeze_classifier=args.freeze_classifier, base_width=p.base_width, leak=p.leak)
else:
raise ValueError('Unknown model name for SEG task: {}'.format(p.name_model))
else:
raise ValueError('Unknown task {}.'.format(args.task))
if verbose:
print('`{}` was successfully instantiated. Nbr.params: {} .... [OK]'.format(model, count_nb_params(model)))
return model |
class SharedMLP(nn.Sequential):
def __init__(self, args: List[int], *, bn: bool=False, activation=nn.ReLU(inplace=True), preact: bool=False, first: bool=False, name: str='', instance_norm: bool=False):
super(SharedMLP, self).__init__()
for i in range((len(args) - 1)):
self.add_module((name + 'layer{}'.format(i)), Conv2d(args[i], args[(i + 1)], bn=(((not first) or (not preact) or (i != 0)) and bn), activation=(activation if ((not first) or (not preact) or (i != 0)) else None), preact=preact, instance_norm=instance_norm)) |
_module
class ContrastiveHead(nn.Module):
def __init__(self, temperature=0.1):
super(ContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
def forward(self, pos, neg):
N = pos.size(0)
logits = torch.cat((pos, neg), dim=1)
logits /= self.temperature
labels = torch.zeros((N,), dtype=torch.long).cuda()
losses = dict()
losses['loss'] = self.criterion(logits, labels)
return losses |
def main():
all_commands = []
all_eval_commands = []
for (att, fixed) in itertools.product((0, 1, 2, 3), (['init'], ['data', 'model'])):
steps = (list(range(1100, 40000, 1000)) + [40000])
for step in steps:
infer_command = (((('python infer.py --config configs/spider-/nl2code-0428-random.jsonnet ' + '--logdir logdirs/-random ') + '--config-args "{{fixed: {fixed}, att: {att}}}" ') + '--output __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ') + '--step {step} --section val --beam-size 1').format(step=step, fixed=fixed, att=att)
eval_command = ((((('python eval.py --config configs/spider-/nl2code-0428-random.jsonnet ' + '--logdir logdirs/-random ') + '--config-args "{{fixed: {fixed}, att: {att}}}" ') + '--inferred __LOGDIR__/infer-val-step{step:05d}-bs1.jsonl ') + '--output __LOGDIR__/eval-val-step{step:05d}-bs1.jsonl ') + '--section val').format(step=step, fixed=fixed, att=att)
print('{} && {}'.format(infer_command, eval_command)) |
class NameAxisLayer(_ConcatInputLayer):
layer_class = 'name_axis'
def __init__(self, axis, description, **kwargs):
super(NameAxisLayer, self).__init__(**kwargs)
from returnn.tf.layers.base import LayerBase
batch_dim = LayerBase.get_recent_layer().get_batch_info().dim
for (i, dyn_size) in self.output.size_placeholder.items():
if ((len(dyn_size.shape) == 0) or (dyn_size.shape[0] == 1)):
dim_tag = DimensionTag.get_tag_from_size_tensor(dyn_size)
new_dyn_size = tf.broadcast_to(dyn_size, [batch_dim])
dim_tag.set_tag_on_size_tensor(new_dyn_size)
dim_tag.dyn_size = new_dyn_size
self.output.size_placeholder[i] = new_dyn_size
def get_out_data_from_opts(cls, name, axis, description, sources, **kwargs):
data = Data.get_common_data([s.output for s in sources])
data = data.copy(name=('%s_output' % name))
if (not isinstance(axis, (list, tuple))):
axis = [axis]
if (not isinstance(description, (list, tuple))):
description = [description]
assert (len(axis) == len(description))
for (ax, descr) in zip(axis, description):
if isinstance(ax, int):
data = data.copy_as_batch_major()
if (isinstance(ax, str) and ('|' in ax)):
possible_axes = ax.split('|')
found_ax = None
for possible_ax in possible_axes:
try:
found_ax = data.get_axis_from_description(possible_ax)
break
except:
continue
assert (found_ax is not None), ('%r: axis %r not found in %r' % (cls, ax, data))
ax = found_ax
if (isinstance(ax, str) and (len(ax) >= 3) and (ax[(- 2)] == '+')):
ax_offset = int(ax[(- 1)])
ax = ax[:(- 2)]
else:
ax_offset = 0
ax = (data.get_axis_from_description(ax, allow_int=True) + ax_offset)
ax_wo_batch = data.get_batch_axis_excluding_batch(ax)
if (descr is None):
del data.size_placeholder[ax_wo_batch]
else:
if (ax_wo_batch in data.size_placeholder):
dyn_size = tf.identity(data.size_placeholder[ax_wo_batch])
else:
assert (data.batch_shape[ax] is not None)
dyn_size = tf.constant(data.batch_shape[ax], shape=(1,))
from returnn.tf.util.basic import DimensionTag
tag = DimensionTag(description=descr, kind=DimensionTag.Types.Time)
data.size_placeholder[ax_wo_batch] = dyn_size
tag.set_tag_on_size_tensor(dyn_size)
return data |
_test(run_synthesis=False)
def test_axpy_unroll_mixed():
(csdfg, sdfg) = _exec_hbmtransform((lambda : create_vadd_sdfg('axpy_mixed')), [('x', 'DDR', '0'), ('y', 'HBM', '0:2'), ('z', 'HBM', '0:2')])
validate_vadd_sdfg(csdfg, [2, 20])
return sdfg |
class LayerNorm1d(nn.LayerNorm):
def __init__(self, num_channels, **kwargs):
super().__init__(num_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return F.layer_norm(x.permute(0, 2, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 2, 1).contiguous() |
def main(argv=None):
set_up_environment(visible_devices=FLAGS.visible_devices)
(x_train, y_train, x_test, y_test, spec_df) = get_data(FLAGS.dataset)
if FLAGS.train_interaction_model:
train_interaction_model(x_train, y_train, x_test, y_test)
(model, random_weights) = load_interaction_model()
interaction_types = ['integrated_hessians', 'expected_hessians', 'hessians', 'hessians_times_inputs', 'shapley_sampling', 'contextual_decomposition', 'neural_interaction_detection']
if (FLAGS.interaction_type not in interaction_types):
raise ValueError('Invalid interaction type `{}`'.format(FLAGS.interaction_type))
print('Evaluating {}'.format(FLAGS.interaction_type))
print('Getting interactions')
interaction_function = return_interaction_function(FLAGS.interaction_type)
interactions_train = interaction_function(model, x_train, baseline=x_test)
interactions_test = interaction_function(model, x_test, baseline=x_train)
(mean_performances, sd_performances) = get_performance_curve(x_train, x_test, model, spec_df, interactions_train, interactions_test, random_weights)
num_removed = np.arange(len(mean_performances))
type_list = ([FLAGS.interaction_type] * len(mean_performances))
data = pd.DataFrame({'interaction_type': type_list, 'mean_perf': mean_performances, 'sd_perf': sd_performances, 'num_interactions_removed': num_removed})
if FLAGS.use_random_draw:
data.to_csv('results_random_draw/{}_{}.csv'.format(FLAGS.dataset, FLAGS.interaction_type))
else:
data.to_csv('results/{}_{}.csv'.format(FLAGS.dataset, FLAGS.interaction_type)) |
class Evaluator():
def __init__(self, opt, projection_mode='orthogonal'):
self.opt = opt
self.load_size = self.opt.loadSize
self.to_tensor = transforms.Compose([transforms.Resize(self.load_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
cuda = (torch.device(('cuda:%d' % opt.gpu_id)) if torch.cuda.is_available() else torch.device('cpu'))
netG = HGPIFuNet(opt, projection_mode).to(device=cuda)
print('Using Network: ', netG.name)
if opt.load_netG_checkpoint_path:
netG.load_state_dict(torch.load(opt.load_netG_checkpoint_path, map_location=cuda))
if (opt.load_netC_checkpoint_path is not None):
print('loading for net C ...', opt.load_netC_checkpoint_path)
netC = ResBlkPIFuNet(opt).to(device=cuda)
netC.load_state_dict(torch.load(opt.load_netC_checkpoint_path, map_location=cuda))
else:
netC = None
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs(('%s/%s' % (opt.results_path, opt.name)), exist_ok=True)
opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt')
with open(opt_log, 'w') as outfile:
outfile.write(json.dumps(vars(opt), indent=2))
self.cuda = cuda
self.netG = netG
self.netC = netC
def load_image(self, image_path, mask_path):
img_name = os.path.splitext(os.path.basename(image_path))[0]
B_MIN = np.array([(- 1), (- 1), (- 1)])
B_MAX = np.array([1, 1, 1])
projection_matrix = np.identity(4)
projection_matrix[(1, 1)] = (- 1)
calib = torch.Tensor(projection_matrix).float()
mask = Image.open(mask_path).convert('L')
mask = transforms.Resize(self.load_size)(mask)
mask = transforms.ToTensor()(mask).float()
image = Image.open(image_path).convert('RGB')
image = self.to_tensor(image)
image = (mask.expand_as(image) * image)
return {'name': img_name, 'img': image.unsqueeze(0), 'calib': calib.unsqueeze(0), 'mask': mask.unsqueeze(0), 'b_min': B_MIN, 'b_max': B_MAX}
def eval(self, data, use_octree=False):
opt = self.opt
with torch.no_grad():
self.netG.eval()
if self.netC:
self.netC.eval()
save_path = ('%s/%s/result_%s.obj' % (opt.results_path, opt.name, data['name']))
if self.netC:
gen_mesh_color(opt, self.netG, self.netC, self.cuda, data, save_path, use_octree=use_octree)
else:
gen_mesh(opt, self.netG, self.cuda, data, save_path, use_octree=use_octree) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.