code stringlengths 101 5.91M |
|---|
def get_mlp(num_input_channels, hidden_channels, num_output_channels, activation, log_softmax_outputs=False):
layers = []
prev_num_hidden_channels = num_input_channels
for num_hidden_channels in hidden_channels:
layers.append(nn.Linear(prev_num_hidden_channels, num_hidden_channels))
layers.append(activation())
prev_num_hidden_channels = num_hidden_channels
layers.append(nn.Linear(prev_num_hidden_channels, num_output_channels))
if log_softmax_outputs:
layers.append(nn.LogSoftmax(dim=1))
return nn.Sequential(*layers) |
class UpTransition(nn.Module):
def __init__(self, inChans, outChans, nConvs, elu, dropout=False):
super(UpTransition, self).__init__()
self.up_conv = nn.ConvTranspose3d(inChans, (outChans // 2), kernel_size=2, stride=2)
self.bn1 = torch.nn.InstanceNorm3d((outChans // 2))
self.do1 = passthrough
self.do2 = nn.Dropout3d()
self.relu1 = ELUCons(elu, (outChans // 2))
self.relu2 = ELUCons(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x, skipx):
out = self.do1(x)
skipxdo = self.do2(skipx)
out = self.relu1(self.bn1(self.up_conv(out)))
xcat = torch.cat((out, skipxdo), 1)
out = self.ops(xcat)
out = self.relu2(torch.add(out, xcat))
return out |
class PseLTae(nn.Module):
def __init__(self, input_dim=10, mlp1=[10, 32, 64], pooling='mean_std', mlp2=[128, 128], with_extra=True, extra_size=4, n_head=16, d_k=8, d_model=256, mlp3=[256, 128], dropout=0.2, T=1000, mlp4=[128, 64, 32], num_classes=20, max_temporal_shift=100):
super(PseLTae, self).__init__()
if with_extra:
mlp2 = deepcopy(mlp2)
mlp2[0] += extra_size
self.spatial_encoder = PixelSetEncoder(input_dim, mlp1=mlp1, pooling=pooling, mlp2=mlp2, with_extra=with_extra, extra_size=extra_size)
self.temporal_encoder = LTAE(in_channels=mlp2[(- 1)], n_head=n_head, d_k=d_k, d_model=d_model, n_neurons=mlp3, dropout=dropout, T=T, max_temporal_shift=max_temporal_shift)
self.decoder = get_decoder(mlp4, num_classes)
def forward(self, pixels, mask, positions, extra, return_feats=False):
spatial_feats = self.spatial_encoder(pixels, mask, extra)
temporal_feats = self.temporal_encoder(spatial_feats, positions)
logits = self.decoder(temporal_feats)
if return_feats:
return (logits, temporal_feats)
else:
return logits
def param_ratio(self):
total = get_ntrainparams(self)
s = get_ntrainparams(self.spatial_encoder)
t = get_ntrainparams(self.temporal_encoder)
c = get_ntrainparams(self.decoder)
print('TOTAL TRAINABLE PARAMETERS : {}'.format(total))
print('RATIOS: Spatial {:5.1f}% , Temporal {:5.1f}% , Classifier {:5.1f}%'.format(((s / total) * 100), ((t / total) * 100), ((c / total) * 100)))
return total |
class BaseGraph():
def __init__(self, num_v: int, e_list: Optional[Union[(List[int], List[List[int]])]]=None, e_weight: Optional[Union[(float, List[float])]]=None, extra_selfloop: bool=False, device: torch.device=torch.device('cpu')):
assert (isinstance(num_v, int) and (num_v > 0)), 'num_v should be a positive integer'
self.clear()
self.device = device
self._num_v = num_v
self._has_extra_selfloop = extra_selfloop
def __repr__(self) -> str:
def state_dict(self) -> Dict[(str, Any)]:
def save(self, file_path: Union[(str, Path)]):
def load(file_path: Union[(str, Path)]):
def draw(self, **kwargs):
def clear(self):
self._raw_e_dict = {}
self._raw_selfloop_dict = {}
self._has_extra_selfloop = False
self._clear_cache()
def _clear_cache(self):
self.cache = {}
def clone(self):
def to(self, device: torch.device):
self.device = device
for v in self.vars_for_DL:
if (self.cache.get(v, None) is not None):
self.cache[v] = self.cache[v].to(device)
return self
def _format_edges(self, e_list: Union[(List[int], List[List[int]])], e_weight: Optional[Union[(float, List[float])]]=None) -> Tuple[(List[List[int]], List[float])]:
if (e_list is None):
return ([], [])
if (isinstance(e_list[0], int) and (len(e_list) == 2)):
e_list = [e_list]
if (e_weight is not None):
e_weight = [e_weight]
assert (np.array(e_list).max() < self._num_v), 'Vertex index out of range'
if (e_weight is None):
e_weight = ([1.0] * len(e_list))
return (e_list, e_weight)
def from_state_dict(state_dict: dict):
def from_adj_list(num_v: int, adj_list: List[List[int]], extra_selfloop: bool=False) -> 'BaseGraph':
def add_edges(self, e_list: Union[(List[int], List[List[int]])], e_weight: Optional[Union[(float, List[float])]]=None, merge_op: str='mean'):
for ((src, dst), w) in zip(e_list, e_weight):
self._add_edge(src, dst, w, merge_op)
self._clear_cache()
def _add_edge(self, src: int, dst: int, w: float, merge_op: str):
if (merge_op == 'mean'):
merge_func = (lambda x, y: ((x + y) / 2))
elif (merge_op == 'max'):
merge_func = (lambda x, y: max(x, y))
elif (merge_op == 'sum'):
merge_func = (lambda x, y: (x + y))
else:
raise ValueError(f'Unknown edge merge operation: {merge_op}.')
if (src == dst):
if (src in self._raw_selfloop_dict):
self._raw_selfloop_dict[src] = merge_func(self._raw_selfloop_dict[src], w)
else:
self._raw_selfloop_dict[src] = w
elif ((src, dst) in self._raw_e_dict):
self._raw_e_dict[(src, dst)] = merge_func(self._raw_e_dict[(src, dst)], w)
else:
self._raw_e_dict[(src, dst)] = w
self._clear_cache()
def remove_edges(self, e_list: Union[(List[int], List[List[int]])]):
def _remove_edge(self, v_a: int, v_b: int):
if ((v_a == v_b) and (v_a in self._raw_selfloop_dict)):
del self._raw_selfloop_dict[v_a]
elif ((v_a, v_b) in self._raw_e_dict):
del self._raw_e_dict[(v_a, v_b)]
self._clear_cache()
def add_extra_selfloop(self):
self._has_extra_selfloop = True
self._clear_cache()
def remove_extra_selfloop(self):
self._has_extra_selfloop = False
self._clear_cache()
def remove_selfloop(self):
self._raw_selfloop_dict.clear()
self.remove_extra_selfloop()
self._clear_cache()
def drop_edges(self, drop_rate: float, ord: str='uniform'):
def v(self) -> List[int]:
return list(range(self.num_v))
def e(self) -> Tuple[(List[List[int]], List[float])]:
if (self.cache.get('e', None) is None):
e_list = [(src_idx, dst_idx) for (src_idx, dst_idx) in self._raw_e_dict.keys()]
w_list = list(self._raw_e_dict.values())
e_list.extend([(v_idx, v_idx) for v_idx in self._raw_selfloop_dict.keys()])
w_list.extend(list(self._raw_selfloop_dict.values()))
if self._has_extra_selfloop:
e_list.extend(((v_idx, v_idx) for v_idx in range(self.num_v)))
w_list.extend(([1.0] * self.num_v))
self.cache['e'] = (e_list, w_list)
return self.cache['e']
def num_v(self) -> int:
return self._num_v
def num_e(self) -> int:
cnt = 0
cnt += len(self._raw_e_dict)
cnt += len(self._raw_selfloop_dict)
if self._has_extra_selfloop:
cnt += self.num_v
return cnt
def vars_for_DL(self) -> List[str]:
def A(self) -> torch.Tensor:
def smoothing(self, X: torch.Tensor, L: torch.Tensor, lamb: float) -> torch.Tensor:
return (X + (lamb * torch.sparse.mm(L, X)))
def v2v(self, X: torch.Tensor, aggr: str='mean', e_weight: Optional[torch.Tensor]=None): |
class T5DenseGatedActDense(nn.Module):
def __init__(self, d_model, d_ff, dropout_rate):
super().__init__()
self.wi_0 = nn.Linear(d_model, d_ff, bias=False)
self.wi_1 = nn.Linear(d_model, d_ff, bias=False)
self.wo = nn.Linear(d_ff, d_model, bias=False)
self.dropout = nn.Dropout(dropout_rate)
self.act = NewGELUActivation()
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = (hidden_gelu * hidden_linear)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states |
def write_mot_results(filename, results, data_type='mot'):
if (not filename):
return
path = os.path.dirname(filename)
if (not os.path.exists(path)):
os.makedirs(path)
if (data_type in ('mot', 'mcmot', 'lab')):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif (data_type == 'kitti'):
save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for (frame_id, tlwhs, track_ids) in results:
if (data_type == 'kitti'):
frame_id -= 1
for (tlwh, track_id) in zip(tlwhs, track_ids):
if (track_id < 0):
continue
(x1, y1, w, h) = tlwh
(x2, y2) = ((x1 + w), (y1 + h))
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('Save results to {}'.format(filename)) |
def _A2B(arithmetic_tensor):
assert (comm.get().get_world_size() == 3)
rank = comm.get().get_rank()
size = arithmetic_tensor.size()
device = arithmetic_tensor.device
(z1, z2) = (BinarySharedTensor.PRZS(size, device=device).share, BinarySharedTensor.PRZS(size, device=device).share)
(x1, x2) = (arithmetic_tensor.share, resharing.replicate_shares(arithmetic_tensor.share))
if (rank == 0):
b1 = BinarySharedTensor.from_shares((z1 ^ (x1 + x2)), src=rank)
b2 = BinarySharedTensor.from_shares(z2, src=rank)
elif (rank == 1):
b1 = BinarySharedTensor.from_shares(z1, src=rank)
b2 = BinarySharedTensor.from_shares((z2 ^ x1), src=rank)
else:
b1 = BinarySharedTensor.from_shares(z1, src=rank)
b2 = BinarySharedTensor.from_shares(z2, src=rank)
binary_tensor = circuit.extract_msb(b1, b2)
binary_tensor.encoder = arithmetic_tensor.encoder
return binary_tensor |
_sentencepiece
class MarianTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MarianTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
def setUp(self):
super().setUp()
vocab = ['</s>', '<unk>', 'This', 'is', 'a', 't', 'est', 'G', '<pad>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(self.tmpdirname)
save_json(vocab_tokens, (save_dir / VOCAB_FILES_NAMES['vocab']))
save_json(mock_tokenizer_config, (save_dir / VOCAB_FILES_NAMES['tokenizer_config_file']))
if (not (save_dir / VOCAB_FILES_NAMES['source_spm']).exists()):
copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['source_spm']))
copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['target_spm']))
tokenizer = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
return ('This is a test', 'This is a test')
def test_convert_token_and_id(self):
token = '</s>'
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[(- 1)], '<pad>')
self.assertEqual(len(vocab_keys), 9)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 9)
def test_tokenizer_equivalence_en_de(self):
en_de_tokenizer = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de')
batch = en_de_tokenizer(['I am a small frog'], return_tensors=None)
self.assertIsInstance(batch, BatchEncoding)
expected = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(expected, batch.input_ids[0])
save_dir = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(save_dir)
contents = [x.name for x in Path(save_dir).glob('*')]
self.assertIn('source.spm', contents)
MarianTokenizer.from_pretrained(save_dir)
def test_outputs_not_longer_than_maxlen(self):
tok = self.get_tokenizer()
batch = tok([('I am a small frog' * 1000), 'I am a small frog'], padding=True, truncation=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 512))
def test_outputs_can_be_shorter(self):
tok = self.get_tokenizer()
batch_smaller = tok(['I am a tiny frog', 'I am a small frog'], padding=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch_smaller, BatchEncoding)
self.assertEqual(batch_smaller.input_ids.shape, (2, 10))
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='Helsinki-NLP/opus-mt-en-de', revision='1a8c2263da11e68e50938f97e10cd57820bd504c', decode_kwargs={'use_source_tokenizer': True})
def test_tokenizer_integration_seperate_vocabs(self):
tokenizer = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs')
source_text = 'Tama on testi'
target_text = 'This is a test'
expected_src_ids = [76, 7, 2047, 2]
expected_target_ids = [69, 12, 11, 940, 2]
src_ids = tokenizer(source_text).input_ids
self.assertListEqual(src_ids, expected_src_ids)
target_ids = tokenizer(text_target=target_text).input_ids
self.assertListEqual(target_ids, expected_target_ids)
decoded = tokenizer.decode(target_ids, skip_special_tokens=True)
self.assertEqual(decoded, target_text) |
class TFRobertaPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def main():
brighter = Func('brighter')
(x, y) = (Var('x'), Var('y'))
offset = Param(UInt(8))
input = ImageParam(UInt(8), 2)
args = [input, offset]
brighter[(x, y)] = (input[(x, y)] + offset)
brighter.vectorize(x, 16).parallel(y)
brighter.compile_to_file('lesson_11_host', args, 'lesson_11_host')
create_android = True
create_windows = True
create_ios = True
if create_android:
target = Target()
target.os = TargetOS.Android
target.arch = TargetArch.ARM
target.bits = 32
arm_features = []
target.set_features(arm_features)
brighter.compile_to_file('lesson_11_arm_32_android', args, 'lesson_11_arm_32_android', target)
if create_windows:
target = Target()
target.os = TargetOS.Windows
target.arch = TargetArch.X86
target.bits = 64
target.set_features([TargetFeature.AVX, TargetFeature.SSE41])
brighter.compile_to_file('lesson_11_x86_64_windows', args, 'lesson_11_x86_64_windows', target)
if create_ios:
target = Target()
target.os = TargetOS.IOS
target.arch = TargetArch.ARM
target.bits = 32
target.set_features([TargetFeature.ARMv7s])
brighter.compile_to_file('lesson_11_arm_32_ios', args, 'lesson_11_arm_32_ios', target)
if create_android:
arm_32_android_magic = [127, ord('E'), ord('L'), ord('F'), 1, 1, 1]
length = len(arm_32_android_magic)
f = open('lesson_11_arm_32_android.o', 'rb')
try:
header_bytes = f.read(length)
except:
print('Android object file not generated')
return (- 1)
f.close()
header = list(unpack(('B' * length), header_bytes))
if (header != arm_32_android_magic):
print([(x == y) for (x, y) in zip(header, arm_32_android_magic)])
raise Exception('Unexpected header bytes in 32-bit arm object file.')
return (- 1)
if create_windows:
win_64_magic = [100, 134]
f = open('lesson_11_x86_64_windows.obj', 'rb')
try:
header_bytes = f.read(2)
except:
print('Windows object file not generated')
return (- 1)
f.close()
header = list(unpack(('B' * 2), header_bytes))
if (header != win_64_magic):
raise Exception('Unexpected header bytes in 64-bit windows object file.')
return (- 1)
if create_ios:
arm_32_ios_magic = [, 12, 11, 1]
f = open('lesson_11_arm_32_ios.o', 'rb')
try:
header_bytes = f.read((4 * 4))
except:
print('ios object file not generated')
return (- 1)
f.close()
header = list(unpack(('I' * 4), header_bytes))
if (header != arm_32_ios_magic):
raise Exception('Unexpected header bytes in 32-bit arm ios object file.')
return (- 1)
print('Success!')
return 0 |
class linear():
def __init__(self, basis, params=None, bias=None):
self.basis = basis
self.nbasis = basis.nbasis
self._init_params = params
self.bias = bias
self.params = params
if (params is None):
self.params = np.zeros(self.nbasis)
self.nparams = self.nbasis
def get_mean(self, X, Psi=None, params=None, bias=None):
if (params is None):
params = np.copy(self.params)
if (bias is None):
bias = np.copy(self.bias)
if (Psi is None):
Psi = self.get_basis(X)
return (Psi.dot(params) + bias)
def set_params(self, params):
self.params = params
def set_bias(self, bias):
self.bias = bias
def _init_params(self, params):
if (params is None):
self.params = np.zeros(self.nbasis)
self.params = params
def _init_bias(self, bias):
if (bias is None):
self.bias = 0
self.bias = bias |
_REGISTRY.register()
def resnet50_ms_l123(pretrained=True, **kwargs):
from dassl.modeling.ops import MixStyle
model = ResNet(block=Bottleneck, layers=[3, 4, 6, 3], ms_class=MixStyle, ms_layers=['layer1', 'layer2', 'layer3'])
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model |
def add_stage(inplanes, outplanes, innerplanes, nblocks, dilation=1, stride_init=2):
res_blocks = []
stride = stride_init
for _ in range(nblocks):
res_blocks.append(add_residual_block(inplanes, outplanes, innerplanes, dilation, stride))
inplanes = outplanes
stride = 1
return (nn.Sequential(*res_blocks), outplanes) |
def chars_token_ratio(dataset, tokenizer, nb_examples=400):
(total_characters, total_tokens) = (0, 0)
for (_, example) in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples):
text = prepare_sample_text(example)
total_characters += len(text)
if tokenizer.is_fast:
total_tokens += len(tokenizer(text).tokens())
else:
total_tokens += len(tokenizer.tokenize(text))
return (total_characters / total_tokens) |
def one_hot_from_names(class_name_or_list, batch_size=1):
try:
from nltk.corpus import wordnet as wn
except ImportError:
raise ImportError('You need to install nltk to use this function')
if (not isinstance(class_name_or_list, (list, tuple))):
class_name_or_list = [class_name_or_list]
else:
batch_size = max(batch_size, len(class_name_or_list))
classes = []
for class_name in class_name_or_list:
class_name = class_name.replace(' ', '_')
original_synsets = wn.synsets(class_name)
original_synsets = list(filter((lambda s: (s.pos() == 'n')), original_synsets))
if (not original_synsets):
return None
possible_synsets = list(filter((lambda s: (s.offset() in IMAGENET)), original_synsets))
if possible_synsets:
classes.append(IMAGENET[possible_synsets[0].offset()])
else:
possible_synsets = sum([(s.hypernyms() + s.hyponyms()) for s in original_synsets], [])
possible_synsets = list(filter((lambda s: (s.offset() in IMAGENET)), possible_synsets))
if possible_synsets:
classes.append(IMAGENET[possible_synsets[0].offset()])
return one_hot_from_int(classes, batch_size=batch_size) |
class DatasetMetafeatures(object):
def __init__(self, dataset_name, metafeature_values):
self.dataset_name = dataset_name
self.metafeature_values = metafeature_values
def _get_arff(self):
output = dict()
output['relation'] = ('metafeatures_%s' % self.dataset_name)
output['description'] = ''
output['attributes'] = [('name', 'STRING'), ('type', 'STRING'), ('fold', 'NUMERIC'), ('repeat', 'NUMERIC'), ('value', 'NUMERIC'), ('time', 'NUMERIC'), ('comment', 'STRING')]
output['data'] = []
for key in sorted(self.metafeature_values):
output['data'].append(self.metafeature_values[key].to_arff_row())
return output
def dumps(self):
return self._get_arff()
def dump(self, path_or_filehandle):
output = self._get_arff()
if isinstance(path_or_filehandle, str):
with open(path_or_filehandle, 'w') as fh:
arff.dump(output, fh)
else:
arff.dump(output, path_or_filehandle)
def load(cls, path_or_filehandle):
if isinstance(path_or_filehandle, str):
with open(path_or_filehandle) as fh:
input = arff.load(fh)
else:
input = arff.load(path_or_filehandle)
dataset_name = input['relation'].replace('metafeatures_', '')
metafeature_values = []
for item in input['data']:
mf = MetaFeatureValue(*item)
metafeature_values.append(mf)
return cls(dataset_name, metafeature_values)
def __repr__(self, verbosity=0):
repr = StringIO()
repr.write(('Metafeatures for dataset %s\n' % self.dataset_name))
for name in self.metafeature_values:
if ((verbosity == 0) and (self.metafeature_values[name].type_ != 'METAFEATURE')):
continue
if (verbosity == 0):
repr.write((' %s: %s\n' % (str(name), str(self.metafeature_values[name].value))))
elif (verbosity >= 1):
repr.write((' %s: %10s (%10fs)\n' % (str(name), str(self.metafeature_values[name].value)[:10], self.metafeature_values[name].time)))
if ((verbosity > 1) and self.metafeature_values[name].comment):
repr.write((' %s\n' % self.metafeature_values[name].comment))
return repr.getvalue()
def keys(self):
return self.metafeature_values.keys()
def __getitem__(self, item):
return self.metafeature_values[item] |
def config_qimname(cfg, i):
return os.path.join(cfg['dir_images'], (cfg['qimlist'][i] + cfg['qext'])) |
def next_quad_double_solution(vrblvl=0):
if (vrblvl > 0):
print('in next_quad_double_solution ...')
phc = get_phcfun()
aidx = pointer(c_int32(1))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> next_quad_double_solution calls phc', end='')
retval = phc(508, aidx, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
sol = get_next_quad_double_solution(1, vrblvl)
return sol |
class VarDict(object):
def _setattr_(obj, key, val):
obj.my_dict[key] = val
def _getattr_(obj, key):
return obj.my_dict[key]
def __init__(self, dict=None):
self.__dict__['my_dict'] = {}
if dict:
for (key, val) in dict.items():
self.__setattr__(key, val)
def __setitem__(self, key, value):
VarDict._setattr_(self, key, value)
def __getitem__(self, key):
return VarDict._getattr_(self, key)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __getattr__(self, key):
return self.__getitem__(key)
def __contains__(self, key):
return (key in self.my_dict)
def __str__(self):
return '\n'.join(('{0}:{1}'.format(key, val) for (key, val) in sorted(self.my_dict.items())))
def get(self, key, value):
return (self.__getitem__(key) if (key in self.my_dict) else value)
def set(self, key, value, func=(lambda x: x)):
val = func(self.get(key, value))
self.__setattr__(key, val)
return val
def to_dict(self):
return self.my_dict
def add(self, dict):
for (key, val) in dict.items():
self.__setattr__(key, val) |
class ImageFeatureToTensor(Preprocessing):
def __init__(self, bigdl_type='float'):
super(ImageFeatureToTensor, self).__init__(bigdl_type) |
_flax
class FlaxElectraModelTest(FlaxModelTesterMixin, unittest.TestCase):
test_head_masking = True
all_model_classes = ((FlaxElectraModel, FlaxElectraForMaskedLM, FlaxElectraForPreTraining, FlaxElectraForTokenClassification, FlaxElectraForQuestionAnswering, FlaxElectraForMultipleChoice, FlaxElectraForSequenceClassification) if is_flax_available() else ())
def setUp(self):
self.model_tester = FlaxElectraModelTester(self)
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
if (model_class_name == FlaxElectraForMaskedLM):
model = model_class_name.from_pretrained('google/electra-small-generator')
else:
model = model_class_name.from_pretrained('google/electra-small-discriminator')
outputs = model(np.ones((1, 1)))
self.assertIsNotNone(outputs) |
def partial_match_score(truth: List[Rationale], pred: List[Rationale], thresholds: List[float]) -> List[Dict[(str, Any)]]:
ann_to_rat = _keyed_rationale_from_list(truth)
pred_to_rat = _keyed_rationale_from_list(pred)
num_classifications = {k: len(v) for (k, v) in pred_to_rat.items()}
num_truth = {k: len(v) for (k, v) in ann_to_rat.items()}
ious = defaultdict(dict)
for k in (set(ann_to_rat.keys()) | set(pred_to_rat.keys())):
for p in pred_to_rat.get(k, []):
best_iou = 0.0
for t in ann_to_rat.get(k, []):
num = len((set(range(p.start_token, p.end_token)) & set(range(t.start_token, t.end_token))))
denom = len((set(range(p.start_token, p.end_token)) | set(range(t.start_token, t.end_token))))
iou = (0 if (denom == 0) else (num / denom))
if (iou > best_iou):
best_iou = iou
ious[k][p] = best_iou
scores = []
for threshold in thresholds:
threshold_tps = dict()
for (k, vs) in ious.items():
threshold_tps[k] = sum((int((x >= threshold)) for x in vs.values()))
micro_r = ((sum(threshold_tps.values()) / sum(num_truth.values())) if (sum(num_truth.values()) > 0) else 0)
micro_p = ((sum(threshold_tps.values()) / sum(num_classifications.values())) if (sum(num_classifications.values()) > 0) else 0)
micro_f1 = _f1(micro_r, micro_p)
macro_rs = list((((threshold_tps.get(k, 0.0) / n) if (n > 0) else 0) for (k, n) in num_truth.items()))
macro_ps = list((((threshold_tps.get(k, 0.0) / n) if (n > 0) else 0) for (k, n) in num_classifications.items()))
macro_r = ((sum(macro_rs) / len(macro_rs)) if (len(macro_rs) > 0) else 0)
macro_p = ((sum(macro_ps) / len(macro_ps)) if (len(macro_ps) > 0) else 0)
macro_f1 = _f1(macro_r, macro_p)
scores.append({'threshold': threshold, 'micro': {'p': micro_p, 'r': micro_r, 'f1': micro_f1}, 'macro': {'p': macro_p, 'r': macro_r, 'f1': macro_f1}})
return scores |
_registry(op_types='ReduceMax, ReduceMin')
class ReduceMinMaxOperator(Operator):
def __init__(self, onnx_quantizer, onnx_node):
super(ReduceMinMaxOperator, self).__init__(onnx_quantizer, onnx_node)
def quantize_check(self):
node = self.node
if (not self.quantizer.is_valid_quantize_weight(node.input[0])):
return False
return True
def quantize(self):
node = self.node
self.quantizer.quantize_inputs(self.node, [0], direct_int8=True)
if ((not self.disable_qdq_for_node_output) or (self.quantizer.mode != 'qdq')):
self.quantizer.quantize_outputs(self.node, direct_int8=True)
node.name = (node.name + '_quant')
def convert_check(self, convert_format):
node = self.node
assert (convert_format in ['static']), "convert format for {} should be in ['static']".format(node.op_type)
parents = self.quantizer.model.get_parents(node)
children = self.quantizer.model.get_children(node)
if (((len(children) == 0) and (len(parents) == 0)) or (not node.name.endswith('_quant'))):
return False
return True
def convert(self, convert_format):
node = self.node
parents = self.quantizer.model.get_parents(node)
children = self.quantizer.model.get_children(node)
if (any([(i.op_type == 'DequantizeLinear') for i in parents]) and any([(i.op_type == 'QuantizeLinear') for i in children])):
for parent in parents:
if (parent.op_type == 'DequantizeLinear'):
self.node.input[0] = parent.input[0]
self.quantizer.remove_nodes.append(parents[0])
break
for child in children:
if (child.op_type == 'QuantizeLinear'):
self.quantizer.remove_nodes.append(child)
self.quantizer.model.replace_input_of_all_nodes(child.output[0], (node.output[0] + '_quantized'))
node.output[0] = (node.output[0] + '_quantized') |
def train_sr(X_train, X_test, y_train, y_test, common_name_model, problemtype, classes, default_features, transform_model, modeldir, settings):
modeltypes = list()
explained_variances = list()
mean_absolute_errors = list()
mean_squared_errors = list()
median_absolute_errors = list()
r2_scores = list()
print(modeldir)
os.chdir(modeldir)
foldername = ''
foldername = (common_name_model + '_temp')
tempdir = ((os.getcwd() + '/') + foldername)
try:
os.mkdir(foldername)
os.chdir(foldername)
except:
shutil.rmtree(foldername)
os.mkdir(foldername)
os.chdir(foldername)
'\n\tLinearRegression fits a linear model with coefficients w = (w_1, ..., w_p)\n\tto minimize the residual sum of squares between the observed responses\n\tin the dataset, and the responses predicted by the linear approximation.\n\n\tExample:\n\t
try:
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
predictions = cross_val_predict(ols, X_test, y_test, cv=6)
f = open('ols.pickle', 'wb')
pickle.dump(ols, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('linear regression')
except:
print('error - ORDINARY LEAST SQUARES')
'\n\tRidge regression addresses some of the problems of\n\tOrdinary Least Squares by imposing a penalty on the\n\tsize of coefficients.\n\n\tThe ridge coefficients minimize a penalized residual sum of squares.\n\n\tExample:\n\t
try:
ridge = linear_model.Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X_train, y_train)
predictions = cross_val_predict(ridge, X_test, y_test, cv=6)
f = open('ridge.pickle', 'wb')
pickle.dump(ridge, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('ridge regression')
except:
print('error - RIDGE REGRESSION')
'\n\tThe Lasso is a linear model that estimates sparse coefficients.\n\tIt is useful in some contexts due to its tendency to prefer solutions\n\twith fewer parameter values, effectively reducing the number of\n\tvariables upon which the given solution is dependent.\n\n\tFor this reason, the Lasso and its variants are fundamental\n\tto the field of compressed sensing. Under certain conditions,\n\tit can recover the exact set of non-zero weights\n\t(see Compressive sensing: tomography reconstruction with L1 prior (Lasso)).\n\n\tExample:\n\t
try:
lasso = linear_model.Lasso(alpha=0.1)
lasso.fit(X_train, y_train)
predictions = cross_val_predict(lasso, X_test, y_test, cv=6)
f = open('lasso.pickle', 'wb')
pickle.dump(lasso, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('LASSO')
except:
print('error - LASSO')
'\n\tThe MultiTaskLasso is a linear model that estimates\n\tsparse coefficients for multiple regression problems\n\tjointly: y is a 2D array, of shape (n_samples, n_tasks).\n\tThe constraint is that the selected features are the same\n\tfor all the regression problems, also called tasks.\n\n\tExample:\n\t
'\n\tElasticNet is a linear regression model trained with L1 and L2 prior as regularizer.\n\tThis combination allows for learning a sparse model where few of the weights are non-zero\n\tlike Lasso, while still maintaining the regularization properties of Ridge.\n\n\tWe control the convex combination of L1 and L2 using the l1_ratio parameter.\n\n\tExample:\n\t
try:
enet = linear_model.ElasticNet()
enet.fit(X_train, y_train)
predictions = cross_val_predict(enet, X_test, y_test, cv=6)
f = open('enet.pickle', 'wb')
pickle.dump(enet, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(ytest, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('elastic net')
except:
print('error - ELASTIC NET')
'\n\tThe MultiTaskElasticNet is an elastic-net model that estimates sparse coefficients\n\tfor multiple regression problems jointly: Y is a 2D array, of shape (n_samples, n_tasks).\n\n\tThe constraint is that the selected features are the same for all the regression problems,\n\talso called tasks.\n\n\tExample:\n\t
'\n\tThe advantages of LARS are:\n\n\t-> It is numerically efficient in contexts where p >> n (i.e., when the number of dimensions is significantly greater than the number of points)\n\t-> It is computationally just as fast as forward selection and has the same order of complexity as an ordinary least squares.\n\t-> It produces a full piecewise linear solution path, which is useful in cross-validation or similar attempts to tune the model.\n\t-> If two variables are almost equally correlated with the response, then their coefficients should increase at approximately the same rate. The algorithm thus behaves as intuition would expect, and also is more stable.\n\t-> It is easily modified to produce solutions for other estimators, like the Lasso.\n\n\tThe disadvantages of the LARS method include:\n\n\t-> Because LARS is based upon an iterative refitting of the residuals,\n\t-> it would appear to be especially sensitive to the effects of noise.\n\n\tExample:\n\t
try:
lars = linear_model.Lars(n_nonzero_coefs=1)
lars.fit(X_train, y_train)
predictions = cross_val_predict(lars, X_test, y_test, cv=6)
f = open('lars.pickle', 'wb')
pickle.dump(lars, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('Least angle regression (LARS)')
except:
print('error - LARS')
'\n\tLassoLars is a lasso model implemented using the LARS algorithm,\n\tand unlike the implementation based on coordinate_descent,\n\tthis yields the exact solution, which is piecewise linear\n\tas a function of the norm of its coefficients.\n\n\tExample:\n\t
try:
lars_lasso = linear_model.LassoLars()
lars_lasso.fit(X_train, y_train)
predictions = cross_val_predict(lars_lasso, X_test, y_test, cv=6)
f = open('lars_lasso.pickle', 'wb')
pickle.dump(lars_lasso, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('LARS lasso')
except:
print('error - LARS LASSO')
'\n\tOrthogonalMatchingPursuit and orthogonal_mp implements the OMP\n\talgorithm for approximating the fit of a linear model with\n\tconstraints imposed on the number of non-zero coefficients (ie. the L 0 pseudo-norm).\n\n\tExample:\n\t
try:
omp = linear_model.OrthogonalMatchingPursuit()
omp.fit(X_train, y_train)
predictions = cross_val_predict(omp, X_test, y_test, cv=6)
f = open('omp.pickle', 'wb')
pickle.dump(omp, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('orthogonal matching pursuit (OMP)')
except:
print('error - ORTHOGONAL MATCHING PURSUIT (OMP)')
'\n\tThe advantages of Bayesian Regression are:\n\n\t-> It adapts to the data at hand.\n\t-> It can be used to include regularization parameters in the estimation procedure.\n\n\tThe disadvantages of Bayesian regression include:\n\n\t-> Inference of the model can be time consuming.\n\n\tExample:\n\t
'\n\tARDRegression is very similar to Bayesian Ridge Regression,\n\tbut can lead to sparser weights w [1] [2]. ARDRegression poses\n\ta different prior over w, by dropping the assumption of\n\tthe Gaussian being spherical.\n\n\tExample:\n\t
'\n\tLogistic regression, despite its name, is a linear model\n\tfor classification rather than regression. Logistic regression\n\tis also known in the literature as logit regression,\n\tmaximum-entropy classification (MaxEnt) or the log-linear classifier.\n\n\tIn this model, the probabilities describing the possible outcomes\n\tof a single trial are modeled using a logistic function.\n\n\tExample:\n\t
try:
lr = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-06)
lr.fit(X_train, y_train)
predictions = cross_val_predict(lr, X_test, y_test, cv=6)
f = open('lr.pickle', 'wb')
pickle.dump(lr, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('logistic regression')
except:
print('error - LOGISTIC REGRESSION')
'\n\tStochastic gradient descent is a simple yet very efficient\n\tapproach to fit linear models. It is particularly useful\n\twhen the number of samples (and the number of features) is very large.\n\tThe partial_fit method allows only/out-of-core learning.\n\n\tThe classes SGDClassifier and SGDRegressor provide functionality\n\tto fit linear models for classification and regression using\n\tdifferent (convex) loss functions and different penalties.\n\tE.g., with loss="log", SGDClassifier fits a logistic regression model,\n\twhile with loss="hinge" it fits a linear support vector machine (SVM).\n\n\tExample:\n\t
try:
scaler = StandardScaler()
scaler.fit(X_train)
X_train_2 = scaler.transform(X_train)
X_test_2 = scaler.transform(X_test)
sgd = linear_model.SGDRegressor()
sgd.fit(X_train_2, y_train)
predictions = cross_val_predict(sgd, X_test_2, y_test, cv=6)
f = open('sgd.pickle', 'wb')
pickle.dump(sgd, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('stochastic gradient descent (SGD)')
except:
print('error - STOCHASTIC GRADIENT DESCENT')
"\n\tMulti-layer Perceptron is sensitive to feature scaling,\n\tso it is highly recommended to scale your data.\n\tFor example, scale each attribute on the input vector X to [0, 1] or [-1, +1],\n\tor standardize it to have mean 0 and variance 1.\n\n\tNote that you must apply the same scaling to the test\n\tset for meaningful results. You can use StandardScaler for standardization.\n\n\tchange the solver to 'lbfgs'. The default'adam' is a SGD-like method,\n\thich is effective for large & messy data but pretty useless for this kind of smooth & small data.\n\n\tExample:\n\t
try:
nn = MLPRegressor(solver='lbfgs')
nn.fit(X_train, y_train)
predictions = cross_val_predict(nn, X_test, y_test, cv=6)
f = open('nn.pickle', 'wb')
pickle.dump(nn, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('perceptron')
except:
print('error - MLP REGRESSOR')
'\n\tThe passive-aggressive algorithms are a family of algorithms\n\tfor large-scale learning. They are similar to the Perceptron\n\tin that they do not require a learning rate. However,\n\tcontrary to the Perceptron, they include a regularization parameter C.\n\n\tExample:\n\t
try:
pa_regr = linear_model.PassiveAggressiveRegressor(random_state=0)
pa_regr.fit(X_train, y_train)
predictions = cross_val_predict(pa_regr, X_test, y_test, cv=6)
f = open('pa_regr.pickle', 'wb')
pickle.dump(pa_regr, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('passive-agressive algorithm')
except:
print('error - PASSIVE-AGGRESSIVE')
'\n\tWhen in doubt, use RANSAC\n\n\tRANSAC (RANdom SAmple Consensus) fits a model from random subsets of\n\tinliers from the complete data set.\n\n\tRANSAC is a non-deterministic algorithm producing only a reasonable\n\tresult with a certain probability, which is dependent on the number\n\tof iterations (see max_trials parameter). It is typically used for\n\tlinear and non-linear regression problems and is especially popular\n\tin the fields of photogrammetric computer vision.\n\n\tExample:\n\t
try:
ransac = linear_model.RANSACRegressor()
ransac.fit(X_train, y_train)
predictions = cross_val_predict(ransac, X_test, y_test, cv=6)
f = open('ransac.pickle', 'wb')
pickle.dump(ransac, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('RANSAC')
except:
print('error - RANSAC')
'\n\tThe TheilSenRegressor estimator uses a generalization of the median\n\tin multiple dimensions. It is thus robust to multivariate outliers.\n\n\tNote however that the robustness of the estimator decreases quickly\n\twith the dimensionality of the problem. It looses its robustness\n\tproperties and becomes no better than an ordinary least squares\n\tin high dimension.\n\n\tNote takes a bit longer to train.\n\n\tExample:\n\t
try:
theilsen = linear_model.TheilSenRegressor(random_state=42)
theilsen.fit(X_train, y_train)
predictions = cross_val_predict(theilsen, X_test, y_test, cv=6)
f = open('theilsen.pickle', 'wb')
pickle.dump(theilsen, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('Theil-Sen')
except:
print('error - THEILSEN')
'\n\tThe HuberRegressor is different to Ridge because it applies a linear loss\n\tto samples that are classified as outliers. A sample is classified as an\n\tinlier if the absolute error of that sample is lesser than a certain threshold.\n\n\tIt differs from TheilSenRegressor and RANSACRegressor because it does not\n\tignore the effect of the outliers but gives a lesser weight to them.\n\n\tExample:\n\t
try:
huber = linear_model.HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100)
huber.fit(X_train, y_train)
predictions = cross_val_predict(huber, X_test, y_test, cv=6)
f = open('huber.pickle', 'wb')
pickle.dump(huber, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('huber regression')
except:
print('error - HUBER')
'\n\tOne common pattern within machine learning is to use linear models trained on\n\tnonlinear functions of the data. This approach maintains the generally fast\n\tperformance of linear methods, while allowing them to fit a much wider range of data.\n\n\tExample:\n\t
try:
poly_lr = Pipeline([('poly', PolynomialFeatures(degree=5, include_bias=False)), ('linreg', LinearRegression(normalize=True))])
poly_lr.fit(X_train, y_train)
predictions = cross_val_predict(poly_lr, X_test, y_test, cv=6)
accuracy = metrics.r2_score(y_test, predictions)
f = open('poly_lr.pickle', 'wb')
pickle.dump(poly_lr, f)
f.close()
(explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores) = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('polynomial (linear regression)')
except:
print('error - POLYNOMIAL')
os.chdir(modeldir)
print('\n\n')
print('RESULTS: \n')
table = BeautifulTable()
table.column_headers = ['model type', 'R^2 score', 'Mean Absolute Errors']
print(len(modeltypes))
print(len(r2_scores))
print(len(mean_absolute_errors))
for i in range(len(modeltypes)):
table.append_row([modeltypes[i], str(r2_scores[i]), str(mean_absolute_errors[i])])
print(table)
filename = (common_name_model + '.xlsx')
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Model type')
worksheet.write('B1', 'R^2 score')
worksheet.write('C1', 'Explained Variances')
worksheet.write('D1', 'Mean Absolute Errors')
worksheet.write('E1', 'Mean Squared Log Errors')
worksheet.write('F1', 'Median Absolute Errors')
varnames = ['ols.pickle', 'ridge.pickle', 'lasso.pickle', 'enet.pickle', 'lars.pickle', 'lars_lasso.pickle', 'omp.pickle', 'lr.pickle', 'sgd.pickle', 'nn.pickle', 'pa_regr.pickle', 'ransac.pickle', 'theilsen.pickle', 'huber.pickle', 'poly_lr.pickle']
mae = mean_absolute_errors
for i in range(len(mae)):
if (mae[i] == 'n/a'):
mae[i] = 10
else:
mae[i] = float(mae[i])
minval = np.amin(mae)
ind = mae.index(minval)
print(('%s has the lowest mean absolute error (%s)' % (modeltypes[ind], str(minval))))
os.chdir(tempdir)
newname = (common_name_model + '.pickle')
print(('saving file to disk (%s)...' % newname))
os.rename(varnames[ind], newname)
shutil.copy(((os.getcwd() + '/') + newname), ((modeldir + '/') + newname))
os.chdir(modeldir)
shutil.rmtree(foldername)
for i in range(len(modeltypes)):
try:
worksheet.write(('A' + str((i + 2))), str(modeltypes[i]))
worksheet.write(('B' + str((i + 2))), str(r2_scores[i]))
worksheet.write(('C' + str((i + 2))), str(explained_variances[i]))
worksheet.write(('D' + str((i + 2))), str(mean_absolute_errors[i]))
worksheet.write(('F' + str((i + 2))), str(median_absolute_errors[i]))
except:
pass
workbook.close()
files = list()
files.append((common_name_model + '.xlsx'))
files.append((common_name_model + '.pickle'))
model_name = (common_name_model + '.pickle')
model_dir = os.getcwd()
return (model_name, model_dir, files) |
def param_grad_or_zeros(param):
if (param.grad is not None):
return param.grad.data.detach()
else:
return th.zeros_like(param) |
def sufficient_expertise(df):
ev_1 = ((df['Sufficient Expertise?_EV_1'] == 'Yes').mean() * 100)
ev_2 = ((df['Sufficient Expertise?_EV_2'] == 'Yes').mean() * 100)
print('EV1:', round(ev_1, 1))
print('EV2:', round(ev_2, 1))
print('Average:', round(np.mean([ev_1, ev_2]), 1)) |
def _propagate_qconfig_recursively(model, prefix, op_qcfgs, qconfig_parent=None):
for (name, child) in model.named_children():
op_name = (prefix + name)
child.qconfig = qconfig_parent
qconfig_son = None
if (op_name in op_qcfgs):
child.qconfig = op_qcfgs[op_name]
qconfig_son = child.qconfig
elif (type(child) == torch.quantization.DeQuantStub):
version = get_torch_version()
if (version.release >= Version('1.8.0').release):
child.qconfig = torch.quantization.QConfig(activation=torch.quantization.MinMaxObserver.with_args(reduce_range=REDUCE_RANGE), weight=torch.quantization.default_per_channel_weight_observer)
_propagate_qconfig_recursively(child, (op_name + '.'), op_qcfgs, qconfig_son) |
def load_dfs(d):
df = pd.json_normalize([load_yaml(f) for fs in d.values() for f in fs])
df.index = [f'{m}' for (m, fs) in d.items() for (i, _) in enumerate(fs)]
return df |
def mkdirs(Dataset_folder, csv_folder, classes, type_csv):
directory_list = ['train', 'validation', 'test']
if (not (type_csv == 'all')):
for class_name in classes:
if (not Dataset_folder.endswith('_nl')):
folder = os.path.join(Dataset_folder, type_csv, class_name, 'Label')
else:
folder = os.path.join(Dataset_folder, type_csv, class_name)
if (not os.path.exists(folder)):
os.makedirs(folder)
filelist = [f for f in os.listdir(folder) if f.endswith('.txt')]
for f in filelist:
os.remove(os.path.join(folder, f))
else:
for directory in directory_list:
for class_name in classes:
if (not Dataset_folder.endswith('_nl')):
folder = os.path.join(Dataset_folder, directory, class_name, 'Label')
else:
folder = os.path.join(Dataset_folder, directory, class_name, 'Label')
if (not os.path.exists(folder)):
os.makedirs(folder)
filelist = [f for f in os.listdir(folder) if f.endswith('.txt')]
for f in filelist:
os.remove(os.path.join(folder, f))
if (not os.path.exists(csv_folder)):
os.makedirs(csv_folder) |
def numpyImageToTensor(image):
return torch.from_numpy(image.transpose((2, 0, 1))).type(torch.float) |
class ARUMCell(RNNCell):
def __init__(self, hidden_size, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None, T_norm=None, eps=1e-12, use_zoneout=False, zoneout_keep_h=0.9, use_layer_norm=False, is_training=False, lambda_pow=0):
super(ARUMCell, self).__init__(_reuse=reuse)
self._hidden_size = hidden_size
self._activation = (activation or relu)
self._T_norm = T_norm
self._kernel_initializer = (kernel_initializer or aux.orthogonal_initializer(1.0))
self._bias_initializer = bias_initializer
self._eps = eps
self._use_zoneout = use_zoneout
self._zoneout_keep_h = zoneout_keep_h
self._use_layer_norm = use_layer_norm
self._is_training = is_training
self._lambda_pow = lambda_pow
def state_size(self):
return (self._hidden_size * (self._hidden_size + 1))
def output_size(self):
return self._hidden_size
def call(self, inputs, state):
size_batch = tf.shape(state)[0]
(assoc_mem, state) = tf.split(state, [(self._hidden_size * self._hidden_size), self._hidden_size], 1)
assoc_mem = tf.reshape(assoc_mem, [size_batch, self._hidden_size, self._hidden_size])
with vs.variable_scope('gates'):
bias_ones = self._bias_initializer
if (self._bias_initializer is None):
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = fully_connected(inputs=tf.concat([inputs, state], axis=1), num_outputs=(2 * self._hidden_size), activation_fn=None, biases_initializer=bias_ones, weights_initializer=aux.rum_ortho_initializer())
(r, u) = array_ops.split(value=value, num_or_size_splits=2, axis=1)
u = sigmoid(u)
if self._use_layer_norm:
concat = tf.concat([r, u], 1)
concat = aux.layer_norm_all(concat, 2, self._hidden_size, 'LN_r_u')
(r, u) = tf.split(concat, 2, 1)
with vs.variable_scope('candidate'):
x_emb = fully_connected(inputs=inputs, num_outputs=self._hidden_size, activation_fn=None, biases_initializer=self._bias_initializer, weights_initializer=self._kernel_initializer)
tmp_rotation = rotation_operator(x_emb, r, self._hidden_size)
Rt = tf.matmul(assoc_mem, tmp_rotation)
state_new = tf.reshape(tf.matmul(Rt, tf.reshape(state, [size_batch, self._hidden_size, 1])), [size_batch, self._hidden_size])
if self._use_layer_norm:
c = self._activation(aux.layer_norm((x_emb + state_new), 'LN_c'))
else:
c = self._activation((x_emb + state_new))
new_h = ((u * state) + ((1 - u) * c))
if (self._T_norm != None):
new_h = (tf.nn.l2_normalize(new_h, 1, epsilon=self._eps) * self._T_norm)
if self._use_zoneout:
new_h = aux.rum_zoneout(new_h, state, self._zoneout_keep_h, self._is_training)
Rt = tf.reshape(Rt, [size_batch, (self._hidden_size * self._hidden_size)])
new_state = tf.concat([Rt, new_h], 1)
return (new_h, new_state)
def zero_state(self, batch_size, dtype):
e = tf.eye(self._hidden_size, batch_shape=[batch_size])
e = tf.reshape(e, [batch_size, (self._hidden_size * self._hidden_size)])
c = tf.zeros([batch_size, self._hidden_size], dtype=dtype)
h = tf.concat([e, c], 1)
return h |
class SubsampleDataset(BaseWrapperDataset):
def __init__(self, dataset, size_ratio):
super().__init__(dataset)
assert (size_ratio < 1)
self.actual_size = np.ceil((len(dataset) * size_ratio)).astype(int)
self.indices = np.random.choice(list(range(len(self.dataset))), self.actual_size, replace=False)
logger.info('subsampled dataset from {} to {} (ratio={})'.format(len(self.dataset), self.actual_size, size_ratio))
def __getitem__(self, index):
return self.dataset[self.indices[index]]
def __len__(self):
return self.actual_size
def collater(self, samples):
return self.dataset.collater(samples)
def sizes(self):
return self.dataset.sizes[self.indices]
def name(self):
return self.dataset.name
def num_tokens(self, index):
return self.dataset.num_tokens(self.indices[index])
def size(self, index):
return self.dataset.size(self.indices[index])
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def prefetch(self, indices):
self.dataset.prefetch(self.indices[indices]) |
_module()
class ResNet50(nn.Module):
def __init__(self, norm_type='sync_batchnorm'):
super(ResNet50, self).__init__()
pretrained = './pretrained/resnet50-imagenet.pth'
model = ResNetBackbone(backbone='deepbase_resnet50_dilated8', pretrained=pretrained, norm_type=norm_type)
self.stem = nn.Sequential(model.prefix, model.maxpool)
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
def init_weights(self, pretrained=None):
pass
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return tuple([x])
def train(self, mode=True):
super(ResNet50, self).train(mode) |
.skip(reason='treeinterpreter no longer maintained')
def test_that_tree_works():
from treeinterpreter import treeinterpreter as ti
dataset = load_diabetes()
rf = RandomForestRegressor()
(X, y) = (dataset.data[:300], dataset.target[:300])
feature_names = dataset.feature_names
X_new = dataset.data[[300, 309]]
y_new = dataset.target[[300, 309]]
rf.fit(X, y)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Conversion of an array with ndim > 0 to a scalar is deprecated*')
(prediction, bias, contributions) = ti.predict(rf, X_new)
explainer = TreeInterpreter(rf, X, feature_names=feature_names)
local_expl = explainer.explain_local(X_new, y_new)
a_local_data = local_expl.data(key=0)
assert all([(feature_names[i] == a_local_data['names'][i]) for i in range(len(feature_names))])
assert all([(contributions[(0, i)] == a_local_data['scores'][i]) for i in range(len(feature_names))])
assert (a_local_data['extra']['names'][0] == 'Bias')
assert (a_local_data['extra']['scores'][0] == bias[0]) |
def main(args):
print(args)
set_random_seed(args.seed)
args.monitor = monitors[args.evaluate]
datamodule = datamodules[args.dataset](args)
model = SLATE(args)
method = SlotAttentionMethod(model=model, datamodule=datamodule, args=args)
method.hparams = args
if args.is_logger_enabled:
logger = pl_loggers.TensorBoardLogger(args.log_path, name=args.log_name)
arg_str_list = ['{}={}'.format(k, v) for (k, v) in vars(args).items()]
arg_str = '__'.join(arg_str_list)
log_dir = os.path.join(args.log_path, args.log_name)
print(log_dir)
logger.experiment.add_text('hparams', arg_str)
callbacks = [LearningRateMonitor('step'), ImageLogCallback(), ModelCheckpoint(monitor=args.monitor, save_top_k=1, save_last=True, mode='max')]
else:
logger = False
callbacks = []
trainer = Trainer(resume_from_checkpoint=(args.ckpt_path if args.load_from_ckpt else None), logger=logger, default_root_dir=args.log_path, accelerator=('ddp' if (args.gpus > 1) else None), num_sanity_val_steps=args.num_sanity_val_steps, gpus=args.gpus, max_steps=args.max_steps, max_epochs=args.max_epochs, log_every_n_steps=50, callbacks=callbacks, check_val_every_n_epoch=args.check_val_every_n_epoch, gradient_clip_val=args.grad_clip)
trainer.fit(method) |
def process_chain(chain: Chain, chain_id: str) -> Protein:
atom_positions = []
aatype = []
atom_mask = []
residue_index = []
b_factors = []
chain_ids = []
for res in chain:
res_shortname = residue_constants.restype_3to1.get(res.resname, 'X')
restype_idx = residue_constants.restype_order.get(res_shortname, residue_constants.restype_num)
pos = np.zeros((residue_constants.atom_type_num, 3))
mask = np.zeros((residue_constants.atom_type_num,))
res_b_factors = np.zeros((residue_constants.atom_type_num,))
for atom in res:
if (atom.name not in residue_constants.atom_types):
continue
pos[residue_constants.atom_order[atom.name]] = atom.coord
mask[residue_constants.atom_order[atom.name]] = 1.0
res_b_factors[residue_constants.atom_order[atom.name]] = atom.bfactor
aatype.append(restype_idx)
atom_positions.append(pos)
atom_mask.append(mask)
residue_index.append(res.id[1])
b_factors.append(res_b_factors)
chain_ids.append(chain_id)
return Protein(atom_positions=np.array(atom_positions), atom_mask=np.array(atom_mask), aatype=np.array(aatype), residue_index=np.array(residue_index), chain_index=np.array(chain_ids), b_factors=np.array(b_factors)) |
def prefetch(tensor_dict, capacity):
names = list(tensor_dict.keys())
dtypes = [t.dtype for t in tensor_dict.values()]
shapes = [t.get_shape() for t in tensor_dict.values()]
prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue')
enqueue_op = prefetch_queue.enqueue(tensor_dict)
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(prefetch_queue, [enqueue_op]))
tf.summary.scalar(('queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity)), (tf.to_float(prefetch_queue.size()) * (1.0 / capacity)))
return prefetch_queue |
def evaluate_boxes(json_dataset, all_boxes, output_dir, use_salt=True, cleanup=True, use_matlab=False):
salt = ('_{}'.format(str(uuid.uuid4())) if use_salt else '')
filenames = _write_voc_results_files(json_dataset, all_boxes, salt)
_do_python_eval(json_dataset, salt, output_dir)
if use_matlab:
_do_matlab_eval(json_dataset, salt, output_dir)
if cleanup:
for filename in filenames:
shutil.copy(filename, output_dir)
os.remove(filename)
return None |
def _train():
(sess, summary_writer) = setup_tensorflow()
all_filenames = prepare_dirs(delete_train_dir=True)
rn.shuffle(all_filenames)
train_filenames = all_filenames[:(- FLAGS.test_vectors)]
test_filenames = all_filenames[(- FLAGS.test_vectors):]
(train_features, train_labels) = srez_input.setup_inputs(sess, train_filenames, image_size=32, crop_size=128)
(test_features, test_labels) = srez_input.setup_inputs(sess, test_filenames, image_size=32, crop_size=128)
noise_level = 0.03
noisy_train_features = (train_features + tf.random_normal(train_features.get_shape(), stddev=noise_level))
[gene_minput, gene_moutput, gene_output, gene_var_list, disc_real_output, disc_fake_output, disc_var_list] = srez_model.create_model(sess, noisy_train_features, train_labels)
gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output, train_features)
(disc_real_loss, disc_fake_loss) = srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')
(global_step, learning_rate, gene_minimize, disc_minimize) = srez_model.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)
train_data = TrainData(locals())
srez_train.train_model(train_data) |
def play_and_get_episode_stats(env: Minesweeper, actions: List[chex.Array], time_limit: int, force_start_state: Optional[State]=None) -> Tuple[(List[float], List[StepType], int)]:
(state, timestep) = jax.jit(env.reset)(jax.random.PRNGKey(0))
if force_start_state:
state = force_start_state
episode_length = 0
step_fn = jax.jit(env.step)
collected_rewards = []
collected_step_types = []
while (not timestep.last()):
(state, timestep) = step_fn(state, actions[episode_length])
episode_length += 1
collected_rewards.append(timestep.reward)
collected_step_types.append(timestep.step_type)
if (episode_length > time_limit):
raise Exception('Entered infinite loop')
return (collected_rewards, collected_step_types, episode_length) |
def resnet_block12(x, cnum, ksize, stride, rate, name, IN=True, padding='REFLECT', activation=tf.nn.elu, training=True):
xin = x
rate = 1
assert (padding in ['SYMMETRIC', 'SAME', 'REFLECT'])
if ((padding == 'SYMMETRIC') or (padding == 'REFLECT')):
p = int(((rate * (ksize - 1)) / 2))
x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], mode=padding)
padding1 = 'VALID'
else:
padding1 = padding
x = tf.layers.conv2d(x, cnum, ksize, stride, dilation_rate=rate, activation=None, padding=padding1, name=(name + '0'))
if IN:
x = tf.contrib.layers.instance_norm(x)
if (activation is not None):
x = activation(x)
rate = 2
if ((padding == 'SYMMETRIC') or (padding == 'REFLECT')):
p = int(((rate * (ksize - 1)) / 2))
x = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], mode=padding)
padding2 = 'VALID'
else:
padding2 = padding
x = tf.layers.conv2d(x, cnum, ksize, stride, dilation_rate=rate, activation=None, padding=padding2, name=(name + '1'))
if IN:
x = tf.contrib.layers.instance_norm(x)
return (xin + x) |
_arg_scope
def batch_norm(inputs, decay=0.999, center=True, scale=False, epsilon=0.001, activation_fn=None, param_initializers=None, param_regularizers=None, updates_collections=ops.GraphKeys.UPDATE_OPS, is_training=True, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, batch_weights=None, fused=None, data_format=DATA_FORMAT_NHWC, zero_debias_moving_mean=False, scope=None, renorm=False, renorm_clipping=None, renorm_decay=0.99, adjustment=None):
if (fused is None):
fused = True
inputs = ops.convert_to_tensor(inputs)
rank = inputs.get_shape().ndims
possible_to_fuse = ((batch_weights is None) and (not renorm) and (rank in [2, 4]) and (adjustment is None))
if (fused and possible_to_fuse and (zero_debias_moving_mean or (rank == 2) or (updates_collections is not ops.GraphKeys.UPDATE_OPS))):
return _fused_batch_norm(inputs, decay=decay, center=center, scale=scale, epsilon=epsilon, activation_fn=activation_fn, param_initializers=param_initializers, param_regularizers=param_regularizers, updates_collections=updates_collections, is_training=is_training, reuse=reuse, variables_collections=variables_collections, outputs_collections=outputs_collections, trainable=trainable, data_format=data_format, zero_debias_moving_mean=zero_debias_moving_mean, scope=scope)
if (data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC)):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if ((batch_weights is None) and (updates_collections is ops.GraphKeys.UPDATE_OPS) and (not zero_debias_moving_mean)):
axis = (1 if (data_format == DATA_FORMAT_NCHW) else (- 1))
if (not param_initializers):
param_initializers = {}
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
if (not param_regularizers):
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(axis=axis, momentum=decay, epsilon=epsilon, center=center, scale=scale, beta_initializer=beta_initializer, gamma_initializer=gamma_initializer, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, beta_regularizer=beta_regularizer, gamma_regularizer=gamma_regularizer, trainable=trainable, renorm=renorm, renorm_clipping=renorm_clipping, renorm_momentum=renorm_decay, adjustment=adjustment, name=sc.name, _scope=sc, _reuse=reuse, fused=fused)
outputs = layer.apply(inputs, training=is_training)
_add_variable_to_collections(layer.moving_mean, variables_collections, 'moving_mean')
_add_variable_to_collections(layer.moving_variance, variables_collections, 'moving_variance')
if (layer.beta is not None):
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if (layer.gamma is not None):
_add_variable_to_collections(layer.gamma, variables_collections, 'gamma')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
if renorm:
raise ValueError('renorm is not supported with batch_weights, updates_collections or zero_debias_moving_mean')
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None):
raise ValueError(('Inputs %s has undefined rank.' % inputs.name))
dtype = inputs.dtype.base_dtype
if (batch_weights is not None):
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
nshape = ([(- 1)] + [1 for _ in range((inputs_rank - 1))])
batch_weights = array_ops.reshape(batch_weights, nshape)
if (data_format == DATA_FORMAT_NCHW):
moments_axes = ([0] + list(range(2, inputs_rank)))
params_shape = inputs_shape[1:2]
params_shape_broadcast = list(([1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)]))
else:
moments_axes = list(range((inputs_rank - 1)))
params_shape = inputs_shape[(- 1):]
params_shape_broadcast = None
if (not params_shape.is_fully_defined()):
raise ValueError(('Inputs %s has undefined channels dimension %s.' % (inputs.name, params_shape)))
(beta, gamma) = (None, None)
if (not param_initializers):
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections, 'beta')
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
beta = variables.model_variable('beta', shape=params_shape, dtype=dtype, initializer=beta_initializer, collections=beta_collections, trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
gamma = variables.model_variable('gamma', shape=params_shape, dtype=dtype, initializer=gamma_initializer, collections=gamma_collections, trainable=trainable)
with variable_scope.variable_scope(variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable('moving_mean', shape=params_shape, dtype=dtype, initializer=moving_mean_initializer, trainable=False, collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable('moving_variance', shape=params_shape, dtype=dtype, initializer=moving_variance_initializer, trainable=False, collections=moving_variance_collections)
is_training_value = utils.constant_value(is_training)
need_moments = ((is_training_value is None) or is_training_value)
if need_moments:
if (batch_weights is None):
if (data_format == DATA_FORMAT_NCHW):
(mean, variance) = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [(- 1)])
variance = array_ops.reshape(variance, [(- 1)])
else:
(mean, variance) = nn.moments(inputs, moments_axes)
elif (data_format == DATA_FORMAT_NCHW):
(mean, variance) = nn.weighted_moments(inputs, moments_axes, batch_weights, keepdims=True)
mean = array_ops.reshape(mean, [(- 1)])
variance = array_ops.reshape(variance, [(- 1)])
else:
(mean, variance) = nn.weighted_moments(inputs, moments_axes, batch_weights)
moving_vars_fn = (lambda : (moving_mean, moving_variance))
if (updates_collections is None):
def _force_updates():
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean, update_moving_variance]):
return (array_ops.identity(mean), array_ops.identity(variance))
(mean, variance) = utils.smart_cond(is_training, _force_updates, moving_vars_fn)
else:
def _delay_updates():
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay, zero_debias=False)
return (update_moving_mean, update_moving_variance)
(update_mean, update_variance) = utils.smart_cond(is_training, _delay_updates, moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
vars_fn = (lambda : (mean, variance))
(mean, variance) = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
(mean, variance) = (moving_mean, moving_variance)
if (data_format == DATA_FORMAT_NCHW):
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
if (beta is not None):
beta = array_ops.reshape(beta, params_shape_broadcast)
if (gamma is not None):
gamma = array_ops.reshape(gamma, params_shape_broadcast)
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs_shape)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
_module()
class PISARetinaHead(RetinaHead):
def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList=None) -> dict:
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, batch_img_metas, device=device)
label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, return_sampling_results=True)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors)
num_imgs = len(batch_img_metas)
flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), label_channels) for cls_score in cls_scores]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).reshape((- 1), flatten_cls_scores[0].size((- 1)))
flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), 4) for bbox_pred in bbox_preds]
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1).view((- 1), flatten_bbox_preds[0].size((- 1)))
flatten_labels = torch.cat(labels_list, dim=1).reshape((- 1))
flatten_label_weights = torch.cat(label_weights_list, dim=1).reshape((- 1))
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape((- 1), 4)
flatten_bbox_targets = torch.cat(bbox_targets_list, dim=1).reshape((- 1), 4)
flatten_bbox_weights = torch.cat(bbox_weights_list, dim=1).reshape((- 1), 4)
isr_cfg = self.train_cfg.get('isr', None)
if (isr_cfg is not None):
all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg['isr'])
(flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets
losses_cls = self.loss_cls(flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=avg_factor)
losses_bbox = self.loss_bbox(flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=avg_factor)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
carl_cfg = self.train_cfg.get('carl', None)
if (carl_cfg is not None):
loss_carl = carl_loss(flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg['carl'], avg_factor=avg_factor, sigmoid=True, num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict |
(jit, static_argnames=('edges', 'node_idx'))
def posterior_update_mean_continuous_node(attributes: Dict, edges: Edges, node_idx: int, node_precision: float) -> float:
precision_weigthed_prediction_error = 0.0
if (edges[node_idx].value_children is not None):
for (value_child_idx, value_coupling) in zip(edges[node_idx].value_children, attributes[node_idx]['value_coupling_children']):
value_prediction_error = attributes[value_child_idx]['temp']['value_prediction_error']
value_prediction_error *= attributes[value_child_idx]['observed']
precision_weigthed_prediction_error += (((value_coupling * attributes[value_child_idx]['expected_precision']) / node_precision) * value_prediction_error)
if (edges[node_idx].volatility_children is not None):
for (volatility_child_idx, volatility_coupling) in zip(edges[node_idx].volatility_children, attributes[node_idx]['volatility_coupling_children']):
volatility_prediction_error = attributes[volatility_child_idx]['temp']['volatility_prediction_error']
effective_precision = attributes[volatility_child_idx]['temp']['effective_precision']
precision_weigthed_prediction_error += ((volatility_coupling * effective_precision) * volatility_prediction_error)
precision_weigthed_prediction_error *= (1 / (2 * node_precision))
precision_weigthed_prediction_error *= attributes[volatility_child_idx]['observed']
posterior_mean = (attributes[node_idx]['expected_mean'] + precision_weigthed_prediction_error)
return posterior_mean |
class galpy_profile(LiteratureReferencesMixIn):
def __init__(self, pot, t=0.0, tgalpy=0.0, ro=8, vo=220.0, reverse=False):
LiteratureReferencesMixIn.__init__(self)
self.pot = pot
self.ro = ro
self.vo = vo
self.reverse = reverse
if isinstance(t, ScalarQuantity):
self.model_time = t
else:
self.model_time = ((t * conversion.time_in_Gyr(ro=self.ro, vo=self.vo)) | units.Gyr)
if isinstance(tgalpy, ScalarQuantity):
self.tgalpy = (tgalpy.value_in(units.Gyr) / conversion.time_in_Gyr(ro=self.ro, vo=self.vo))
else:
self.tgalpy = tgalpy
def evolve_model(self, time):
dt = (time - self.model_time)
self.model_time = time
if self.reverse:
self.tgalpy -= (dt.value_in(units.Gyr) / conversion.time_in_Gyr(ro=self.ro, vo=self.vo))
else:
self.tgalpy += (dt.value_in(units.Gyr) / conversion.time_in_Gyr(ro=self.ro, vo=self.vo))
def get_potential_at_point(self, eps, x, y, z):
R = numpy.sqrt(((x.value_in(units.kpc) ** 2.0) + (y.value_in(units.kpc) ** 2.0)))
zed = z.value_in(units.kpc)
phi = numpy.arctan2(y.value_in(units.kpc), x.value_in(units.kpc))
res = potential.evaluatePotentials(self.pot, (R / self.ro), (zed / self.ro), phi=phi, t=self.tgalpy, ro=self.ro, vo=self.vo, use_physical=False)
return ((res * (self.vo ** 2)) | (units.kms ** 2))
def get_gravity_at_point(self, eps, x, y, z):
R = numpy.sqrt(((x.value_in(units.kpc) ** 2.0) + (y.value_in(units.kpc) ** 2.0)))
zed = z.value_in(units.kpc)
phi = numpy.arctan2(y.value_in(units.kpc), x.value_in(units.kpc))
Rforce = potential.evaluateRforces(self.pot, (R / self.ro), (zed / self.ro), phi=phi, t=self.tgalpy, use_physical=False)
phitorque = (potential.evaluatephitorques(self.pot, (R / self.ro), (zed / self.ro), phi=phi, t=self.tgalpy, use_physical=False) / (R / self.ro))
zforce = potential.evaluatezforces(self.pot, (R / self.ro), (zed / self.ro), phi=phi, t=self.tgalpy, use_physical=False)
(cp, sp) = (numpy.cos(phi), numpy.sin(phi))
ax = ((((Rforce * cp) - (phitorque * sp)) * conversion.force_in_kmsMyr(ro=self.ro, vo=self.vo)) | (units.kms / units.Myr))
ay = ((((Rforce * sp) + (phitorque * cp)) * conversion.force_in_kmsMyr(ro=self.ro, vo=self.vo)) | (units.kms / units.Myr))
az = ((zforce * conversion.force_in_kmsMyr(ro=self.ro, vo=self.vo)) | (units.kms / units.Myr))
return (ax, ay, az)
def mass_density(self, x, y, z):
R = numpy.sqrt(((x.value_in(units.kpc) ** 2.0) + (y.value_in(units.kpc) ** 2.0)))
zed = z.value_in(units.kpc)
phi = numpy.arctan2(y.value_in(units.kpc), x.value_in(units.kpc))
res = (potential.evaluateDensities(self.pot, (R / self.ro), (zed / self.ro), phi=phi, t=self.tgalpy, ro=self.ro, vo=self.vo, use_physical=False) * conversion.dens_in_msolpc3(self.vo, self.ro))
return (res | (units.MSun / (units.parsec ** 3)))
def circular_velocity(self, r):
res = potential.vcirc(self.pot, (r.value_in(units.kpc) / self.ro), phi=0, t=self.tgalpy, ro=self.ro, vo=self.vo, use_physical=False)
return ((res * self.vo) | units.kms)
def enclosed_mass(self, r):
vc = (potential.vcirc(self.pot, (r.value_in(units.kpc) / self.ro), phi=0, t=self.tgalpy, ro=self.ro, vo=self.vo, use_physical=False) * self.vo)
return ((((vc ** 2.0) * r.value_in(units.parsec)) / conversion._G) | units.MSun)
def stop(self):
pass |
class RNN(Model):
_compatible_windows = (window_module.Global, window_module.Sliding, window_module.Expanding, window_module.Dyadic)
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, nonlinearity='tanh', bias=True, dropout=0):
super(RNN, self).__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.num_layers = num_layers
self.nonlinearity = nonlinearity
self.bias = bias
self.dropout = dropout
self.rnn = nn.RNN(input_size=in_channels, hidden_size=hidden_channels, num_layers=num_layers, nonlinearity=nonlinearity, bias=bias, dropout=dropout, batch_first=True)
self.total_hidden_size = (num_layers * hidden_channels)
self.linear = nn.Linear(self.total_hidden_size, out_channels)
def forward(self, signatures):
assert (len(signatures) == 1)
signatures = signatures[0]
x = torch.stack(signatures, dim=1)
hidden = self.rnn(x)[1]
hidden = hidden.transpose(0, 1)
hidden = hidden.reshape(hidden.size(0), self.total_hidden_size)
return self.linear(hidden) |
def timer(log=None):
if (log is None):
timer.time0 = time.time()
else:
end = time.time()
print(f'{log}: {(end - timer.time0)}') |
(name='save_json_mock')
def _save_json_mock(monkeypatch: MonkeyPatch) -> MagicMock:
save_mock = MagicMock()
monkeypatch.setattr(cache.file_utils, 'safe_jsonify', save_mock)
return save_mock |
def _count_unmasked_weights(model):
mlist = get_modules(model)
unmaskeds = []
for m in mlist:
unmaskeds.append(m.weight_mask.sum())
return torch.FloatTensor(unmaskeds) |
class UniSpeechSatModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def dot_attention(queries, attns=None, memory=None, seq_len=None, causality=False, scope='Dot_Attention', reuse=None, mask=None, return_weights=False, bias=True, dropout=0.0):
with tf.variable_scope(scope, default_name='dot_attention', reuse=reuse):
key = tf.expand_dims(memory, 1)
queries = tf.expand_dims(queries, 1)
logits = tf.matmul(queries, key, transpose_b=True)
shapes = [(x if (x != None) else (- 1)) for x in logits.shape.as_list()]
if (mask is not None):
mask = tf.cast(tf.reshape(mask, [(- 1), 1, 1, shapes[(- 1)]]), tf.int32)
logits = mask_logits(logits, mask)
weights = tf.nn.softmax(logits, name='attention_weights')
weights = tf.nn.dropout(weights, (1.0 - dropout))
res = tf.matmul(weights, tf.expand_dims(memory, 1))
res = combine_last_two_dimensions(tf.transpose(res, [0, 2, 1, 3]))
return (res, weights, logits) |
def test_ordering():
n = Network([_TestAgent('A'), _TestAgent('B'), _TestAgent('C')], BatchResolver())
n.add_connection('A', 'B')
n.add_connection('A', 'C')
n.add_connection('B', 'C')
n.send('A', 'B', Request(100.0))
n.send('A', 'C', Request(100.0))
n.send('B', 'C', Request(100.0))
n.resolve({aid: n.context_for(aid, EnvView(0, 0.0)) for aid in n.agents})
assert (n['A'].req_time <= n['B'].req_time)
assert (n['B'].req_time <= n['C'].req_time)
assert (n['C'].res_time <= n['A'].res_time)
assert (n['A'].res_time <= n['B'].res_time) |
def eval_full(tags_ours, tags_gold):
our_lst = []
for elem in tags_ours:
our_lst += elem
gold_lst = []
for elem in tags_gold:
gold_lst += elem
assert (len(our_lst) == len(gold_lst))
v_score = v_measure_score(our_lst, gold_lst)
return v_score |
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples) |
def test_contrast_attribute_target_only_enc_dec(saliency_mt_model: EncoderDecoderAttributionModel):
inseq.register_step_function(fn=attr_prob_diff_fn, identifier='attr_prob_diff', overwrite=True)
src = 'The nurse was tired and went home.'
tgt = "L'infermiere era stanco e ando a casa."
contrast_tgt = "L'infermiera era stanca e ando a casa."
out_explicit_logit_prob_diff = saliency_mt_model.attribute(src, tgt, contrast_targets=contrast_tgt, attributed_fn='attr_prob_diff', step_scores=['attr_prob_diff', 'contrast_prob_diff'], attribute_target=True)
out_default_prob_diff = saliency_mt_model.attribute(src, tgt, contrast_targets=contrast_tgt, attributed_fn='contrast_prob_diff', step_scores=['contrast_prob_diff'], attribute_target=True)
assert torch.allclose(out_explicit_logit_prob_diff[0].step_scores['contrast_prob_diff'], out_default_prob_diff[0].step_scores['contrast_prob_diff'])
assert torch.allclose(out_explicit_logit_prob_diff[0].source_attributions, out_default_prob_diff[0].source_attributions)
assert torch.allclose(out_explicit_logit_prob_diff[0].target_attributions, out_default_prob_diff[0].target_attributions, equal_nan=True)
out_contrast_force_inputs_prob_diff = saliency_mt_model.attribute(src, tgt, contrast_targets=contrast_tgt, attributed_fn='contrast_prob_diff', step_scores=['contrast_prob_diff'], attribute_target=True, contrast_force_inputs=True)
assert (not torch.allclose(out_explicit_logit_prob_diff[0].source_attributions, out_contrast_force_inputs_prob_diff[0].source_attributions))
assert (not torch.allclose(out_explicit_logit_prob_diff[0].target_attributions, out_contrast_force_inputs_prob_diff[0].target_attributions, equal_nan=True))
assert torch.allclose(out_explicit_logit_prob_diff[0].step_scores['contrast_prob_diff'], out_default_prob_diff[0].step_scores['contrast_prob_diff']) |
def translation(translation):
return np.array([[1, 0, translation[0]], [0, 1, translation[1]], [0, 0, 1]]) |
class SimpleCrossAttnDownBlock2D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attention_head_dim: int=1, cross_attention_dim: int=1280, output_scale_factor: float=1.0, add_downsample: bool=True, skip_time_act: bool=False, only_cross_attention: bool=False, cross_attention_norm: Optional[str]=None):
super().__init__()
self.has_cross_attention = True
resnets = []
attentions = []
self.attention_head_dim = attention_head_dim
self.num_heads = (out_channels // self.attention_head_dim)
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act))
processor = (AttnAddedKVProcessor2_0() if hasattr(F, 'scaled_dot_product_attention') else AttnAddedKVProcessor())
attentions.append(Attention(query_dim=out_channels, cross_attention_dim=out_channels, heads=self.num_heads, dim_head=attention_head_dim, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm, processor=processor))
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True)])
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None) -> Tuple[(torch.FloatTensor, Tuple[(torch.FloatTensor, ...)])]:
output_states = ()
cross_attention_kwargs = (cross_attention_kwargs if (cross_attention_kwargs is not None) else {})
lora_scale = cross_attention_kwargs.get('scale', 1.0)
if (attention_mask is None):
mask = (None if (encoder_hidden_states is None) else encoder_attention_mask)
else:
mask = attention_mask
for (resnet, attn) in zip(self.resnets, self.attentions):
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if (return_dict is not None):
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs)
else:
hidden_states = resnet(hidden_states, temb, scale=lora_scale)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=mask, **cross_attention_kwargs)
output_states = (output_states + (hidden_states,))
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, temb, scale=lora_scale)
output_states = (output_states + (hidden_states,))
return (hidden_states, output_states) |
class InterpolationBlock(nn.Module):
def __init__(self, scale_factor, mode='nearest', align_corners=None):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(input=x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) |
def generate_label(args):
save_dir = os.path.join(args.root, args.savedir)
os.makedirs(save_dir, exist_ok=True)
generate_json_file(save_dir, 'train_val.json', TRAIN_VAL_SET)
generate_json_file(save_dir, 'test.json', TEST_SET)
print('generating train_val set...')
gen_label_for_json(args, 'train_val')
print('generating test set...')
gen_label_for_json(args, 'test') |
class RBTree(_ABCTree):
def is_red(node):
if ((node is not None) and node.red):
return True
else:
return False
def jsw_single(root, direction):
other_side = (1 - direction)
save = root[other_side]
root[other_side] = save[direction]
save[direction] = root
root.red = True
save.red = False
return save
def jsw_double(root, direction):
other_side = (1 - direction)
root[other_side] = RBTree.jsw_single(root[other_side], other_side)
return RBTree.jsw_single(root, direction)
def _new_node(self, key, value):
self._count += 1
return Node(key, value)
def insert(self, key, value):
if (self._root is None):
self._root = self._new_node(key, value)
self._root.red = False
return
head = Node()
grand_parent = None
grand_grand_parent = head
parent = None
direction = 0
last = 0
grand_grand_parent.right = self._root
node = grand_grand_parent.right
while True:
if (node is None):
node = self._new_node(key, value)
parent[direction] = node
elif (RBTree.is_red(node.left) and RBTree.is_red(node.right)):
node.red = True
node.left.red = False
node.right.red = False
if (RBTree.is_red(node) and RBTree.is_red(parent)):
direction2 = (1 if (grand_grand_parent.right is grand_parent) else 0)
if (node is parent[last]):
grand_grand_parent[direction2] = RBTree.jsw_single(grand_parent, (1 - last))
else:
grand_grand_parent[direction2] = RBTree.jsw_double(grand_parent, (1 - last))
if (self._cmp(self._cmp_data, key, node.key) == 0):
node.value = value
break
last = direction
direction = (0 if (self._cmp(self._cmp_data, key, node.key) < 0) else 1)
if (grand_parent is not None):
grand_grand_parent = grand_parent
grand_parent = parent
parent = node
node = node[direction]
self._root = head.right
self._root.red = False
def remove(self, key):
if (self._root is None):
raise KeyError(str(key))
head = Node()
node = head
node.right = self._root
parent = None
grand_parent = None
found = None
direction = 1
while (node[direction] is not None):
last = direction
grand_parent = parent
parent = node
node = node[direction]
direction = (1 if (self._cmp(self._cmp_data, node.key, key) < 0) else 0)
if (self._cmp(self._cmp_data, key, node.key) == 0):
found = node
if ((not RBTree.is_red(node)) and (not RBTree.is_red(node[direction]))):
if RBTree.is_red(node[(1 - direction)]):
parent[last] = RBTree.jsw_single(node, direction)
parent = parent[last]
elif (not RBTree.is_red(node[(1 - direction)])):
sibling = parent[(1 - last)]
if (sibling is not None):
if ((not RBTree.is_red(sibling[(1 - last)])) and (not RBTree.is_red(sibling[last]))):
parent.red = False
sibling.red = True
node.red = True
else:
direction2 = (1 if (grand_parent.right is parent) else 0)
if RBTree.is_red(sibling[last]):
grand_parent[direction2] = RBTree.jsw_double(parent, last)
elif RBTree.is_red(sibling[(1 - last)]):
grand_parent[direction2] = RBTree.jsw_single(parent, last)
grand_parent[direction2].red = True
node.red = True
grand_parent[direction2].left.red = False
grand_parent[direction2].right.red = False
if (found is not None):
found.key = node.key
found.value = node.value
parent[int((parent.right is node))] = node[int((node.left is None))]
node.free()
self._count -= 1
self._root = head.right
if (self._root is not None):
self._root.red = False
if (not found):
raise KeyError(str(key)) |
class EMAModelTests(unittest.TestCase):
model_id = 'hf-internal-testing/tiny-stable-diffusion-pipe'
batch_size = 1
prompt_length = 77
text_encoder_hidden_dim = 32
num_in_channels = 4
latent_height = latent_width = 64
generator = torch.manual_seed(0)
def get_models(self, decay=0.9999):
unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder='unet')
unet = unet.to(torch_device)
ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config)
return (unet, ema_unet)
def get_dummy_inputs(self):
noisy_latents = torch.randn(self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator).to(torch_device)
timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device)
encoder_hidden_states = torch.randn(self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator).to(torch_device)
return (noisy_latents, timesteps, encoder_hidden_states)
def simulate_backprop(self, unet):
updated_state_dict = {}
for (k, param) in unet.state_dict().items():
updated_param = (torch.randn_like(param) + (param * torch.randn_like(param)))
updated_state_dict.update({k: updated_param})
unet.load_state_dict(updated_state_dict)
return unet
def test_optimization_steps_updated(self):
(unet, ema_unet) = self.get_models()
ema_unet.step(unet.parameters())
assert (ema_unet.optimization_step == 1)
for _ in range(2):
ema_unet.step(unet.parameters())
assert (ema_unet.optimization_step == 3)
def test_shadow_params_not_updated(self):
(unet, ema_unet) = self.get_models()
ema_unet.step(unet.parameters())
orig_params = list(unet.parameters())
for (s_param, param) in zip(ema_unet.shadow_params, orig_params):
assert torch.allclose(s_param, param)
for _ in range(4):
ema_unet.step(unet.parameters())
for (s_param, param) in zip(ema_unet.shadow_params, orig_params):
assert torch.allclose(s_param, param)
def test_shadow_params_updated(self):
(unet, ema_unet) = self.get_models()
unet_pseudo_updated_step_one = self.simulate_backprop(unet)
ema_unet.step(unet_pseudo_updated_step_one.parameters())
orig_params = list(unet_pseudo_updated_step_one.parameters())
for (s_param, param) in zip(ema_unet.shadow_params, orig_params):
assert (~ torch.allclose(s_param, param))
for _ in range(4):
ema_unet.step(unet.parameters())
for (s_param, param) in zip(ema_unet.shadow_params, orig_params):
assert (~ torch.allclose(s_param, param))
def test_consecutive_shadow_params_updated(self):
(unet, ema_unet) = self.get_models()
unet_step_one = self.simulate_backprop(unet)
ema_unet.step(unet_step_one.parameters())
step_one_shadow_params = ema_unet.shadow_params
unet_step_two = self.simulate_backprop(unet_step_one)
ema_unet.step(unet_step_two.parameters())
step_two_shadow_params = ema_unet.shadow_params
for (step_one, step_two) in zip(step_one_shadow_params, step_two_shadow_params):
assert (~ torch.allclose(step_one, step_two))
def test_zero_decay(self):
(unet, ema_unet) = self.get_models(decay=0.0)
unet_step_one = self.simulate_backprop(unet)
ema_unet.step(unet_step_one.parameters())
step_one_shadow_params = ema_unet.shadow_params
unet_step_two = self.simulate_backprop(unet_step_one)
ema_unet.step(unet_step_two.parameters())
step_two_shadow_params = ema_unet.shadow_params
for (step_one, step_two) in zip(step_one_shadow_params, step_two_shadow_params):
assert torch.allclose(step_one, step_two)
_mps
def test_serialization(self):
(unet, ema_unet) = self.get_models()
(noisy_latents, timesteps, encoder_hidden_states) = self.get_dummy_inputs()
with tempfile.TemporaryDirectory() as tmpdir:
ema_unet.save_pretrained(tmpdir)
loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel)
loaded_unet = loaded_unet.to(unet.device)
output = unet(noisy_latents, timesteps, encoder_hidden_states).sample
output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample
assert torch.allclose(output, output_loaded, atol=0.0001) |
def apply_random_jpeg_compress(img, chance, mask=None, rnd_state=None):
if (rnd_state is None):
rnd_state = np.random
result = img
if (rnd_state.randint(100) < np.clip(chance, 0, 100)):
(h, w, c) = result.shape
quality = rnd_state.randint(10, 101)
(ret, result) = cv2.imencode('.jpg', np.clip((img * 255), 0, 255).astype(np.uint8), [int(cv2.IMWRITE_JPEG_QUALITY), quality])
if (ret == True):
result = cv2.imdecode(result, flags=cv2.IMREAD_UNCHANGED)
result = (result.astype(np.float32) / 255.0)
if (mask is not None):
result = ((img * (1 - mask)) + (result * mask))
return result |
class RobertaTokenizerFast():
def __init__(self, *args, **kwargs):
requires_tokenizers(self)
def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) |
def normalize_2d(x, eps=1e-08):
assert (x.dim() == 2)
l2 = x.norm(2, 1)
return (x / (l2 + eps).expand_as(x)) |
def _postprocess_output(ioup, output, an_num, num_classes, iou_aware_factor):
tensors = []
stride = (output.shape[1] // an_num)
for m in range(an_num):
tensors.append(fluid.layers.slice(output, axes=[1], starts=[((stride * m) + 0)], ends=[((stride * m) + 4)]))
obj = fluid.layers.slice(output, axes=[1], starts=[((stride * m) + 4)], ends=[((stride * m) + 5)])
obj = fluid.layers.sigmoid(obj)
ip = fluid.layers.slice(ioup, axes=[1], starts=[m], ends=[(m + 1)])
new_obj = (fluid.layers.pow(obj, (1 - iou_aware_factor)) * fluid.layers.pow(ip, iou_aware_factor))
new_obj = _de_sigmoid(new_obj)
tensors.append(new_obj)
tensors.append(fluid.layers.slice(output, axes=[1], starts=[((stride * m) + 5)], ends=[(((stride * m) + 5) + num_classes)]))
output = fluid.layers.concat(tensors, axis=1)
return output |
def get_output_module(last_state, encoded_query, num_blocks, vocab_size, activation=tf.nn.relu, initializer=None, scope=None):
with tf.variable_scope(scope, 'Output', initializer=initializer):
last_state = tf.stack(tf.split(last_state, num_blocks, axis=1), axis=1)
(_, _, embedding_size) = last_state.get_shape().as_list()
attention = tf.reduce_sum((last_state * encoded_query), axis=2)
attention_max = tf.reduce_max(attention, axis=(- 1), keep_dims=True)
attention = tf.nn.softmax((attention - attention_max))
attention = tf.expand_dims(attention, axis=2)
u = tf.reduce_sum((last_state * attention), axis=1)
R = tf.get_variable('R', [embedding_size, vocab_size])
H = tf.get_variable('H', [embedding_size, embedding_size])
q = tf.squeeze(encoded_query, axis=1)
y = tf.matmul(activation((q + tf.matmul(u, H))), R)
return y
outputs = None
return outputs |
class Trainer(object):
def __init__(self, args, model, criterion):
super(Trainer, self).__init__()
self.model = model
self.device = ('cuda' if torch.cuda.is_available() else 'cpu')
self.criterion = criterion
self.args = args
def train(self, epoch, data_loaders, optimizer, print_freq=10, train_iters=400):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_meta_train = AverageMeter()
losses_meta_test = AverageMeter()
metaLR = optimizer.param_groups[0]['lr']
source_count = len(data_loaders)
end = time.time()
for i in range(train_iters):
data_loader_index = [i for i in range(source_count)]
random.shuffle(data_loader_index)
batch_data = [data_loaders[i].next() for i in range(source_count)]
data_time.update((time.time() - end))
optimizer.zero_grad()
for p in self.model.parameters():
if (p.grad is None):
p.grad = torch.zeros_like(p)
loss_meta_train = 0.0
loss_meta_test = 0.0
for t in range(source_count):
inner_model = copy.deepcopy(self.model)
inner_opt = torch.optim.Adam(inner_model.parameters(), lr=metaLR, weight_decay=self.args.weight_decay)
data_time.update((time.time() - end))
traininputs = batch_data[data_loader_index[t]]
trainid = data_loader_index[t]
if (t == (len(data_loader_index) - 1)):
testinputs = batch_data[data_loader_index[0]]
testid = data_loader_index[0]
else:
testinputs = batch_data[data_loader_index[(t + 1)]]
testid = data_loader_index[(t + 1)]
(inputs, targets) = self._parse_data(traininputs)[2:]
(pred_mtr, sim_loss, sim_loss2, orth_loss) = inner_model.train_forward(inputs, trainid)
loss_mtr = (((self.criterion(pred_mtr, targets) + torch.sum(sim_loss)) + sim_loss2) + orth_loss)
loss_meta_train += loss_mtr
inner_opt.zero_grad()
loss_mtr.backward()
inner_opt.step()
for (p_tgt, p_src) in zip(self.model.parameters(), inner_model.parameters()):
if (p_src.grad is not None):
p_tgt.grad.data.add_((p_src.grad.data / source_count))
(testInputs, testMaps) = self._parse_data(testinputs)[:2]
(pred_mte, sim_loss, sim_loss2, orth_loss) = inner_model.train_forward(testInputs, testid)
loss_mte = (((self.criterion(pred_mte, testMaps) + torch.sum(sim_loss)) + sim_loss2) + orth_loss)
loss_meta_test += loss_mte
grad_inner_j = torch.autograd.grad(loss_mte, inner_model.parameters(), allow_unused=True)
for (p, g_j) in zip(self.model.parameters(), grad_inner_j):
if (g_j is not None):
p.grad.data.add_(((1.0 * g_j.data) / source_count))
loss_final = (loss_meta_train + loss_meta_test)
losses_meta_train.update(loss_meta_train.item())
losses_meta_test.update(loss_meta_test.item())
optimizer.step()
losses.update(loss_final.item())
batch_time.update((time.time() - end))
end = time.time()
if (((i + 1) % print_freq) == 0):
print('Epoch: [{}][{}/{}]\tTime {:.3f} ({:.3f})\tTotal loss {:.3f} ({:.3f})\tLoss {:.3f}({:.3f})\tLossMeta {:.3f}({:.3f})'.format(epoch, (i + 1), train_iters, batch_time.val, batch_time.avg, losses.val, losses.avg, losses_meta_train.val, losses_meta_train.avg, losses_meta_test.val, losses_meta_test.avg))
def _parse_data(self, inputs):
(imgs, dens, imgs2, dens2) = inputs
return (imgs.cuda(), dens.cuda(), imgs2.cuda(), dens2.cuda()) |
class StateManagerBase(object):
def __init__(self) -> None:
pass
def update_state(self, state_update_instructions) -> bool:
pass
def get_current_state(self) -> object:
return None
def get_state(self, rollback_steps) -> object:
return None
def rollback(self, rollback_steps) -> object:
pass |
def load_vince_model(path):
checkpoint = torch.load(path, map_location={'cuda:0': 'cpu'})
checkpoint = {k.replace('feature_extractor.module.model.', ''): checkpoint[k] for k in checkpoint if ('feature_extractor' in k)}
return checkpoint |
def mask_dir(temp_dir: pathlib.Path) -> pathlib.Path:
mask_dir = (temp_dir / 'mask')
mask_dir.mkdir()
return mask_dir |
def real_osculating_planes(mdim, pdim, qdeg):
from phcpy.phcpy2c3 import py2c_schubert_osculating_planes
dim = ((mdim * pdim) + (qdeg * (mdim + pdim)))
from random import uniform as u
pts = ''
for k in range(dim):
cff = ('%.17lf' % u((- 1), (+ 1)))
pts = ((pts + ' ') + cff)
osc = py2c_schubert_osculating_planes(mdim, pdim, qdeg, len(pts), pts)
items = osc.split(' ')
ind = 0
planes = []
for k in range(0, dim):
plane = []
for i in range(0, (mdim + pdim)):
row = []
for j in range(0, mdim):
row.append(eval(items[ind]))
ind = (ind + 1)
plane.append(row)
planes.append(plane)
return planes |
def load_mnist_m(dataset_dir, split='train'):
data_dir = osp.join(dataset_dir, MNIST_M[split])
n_max = (10000 if (split == 'train') else None)
return read_image_list(data_dir, n_max=n_max) |
def process(args):
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
feature_root = (out_root / 'fbank80')
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f'Fetching split {split}...')
dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=True)
print('Extracting log mel filter bank features...')
for (wav, sample_rate, _, spk_id, chapter_no, utt_no) in tqdm(dataset):
sample_id = f'{spk_id}-{chapter_no}-{utt_no}'
extract_fbank_features(wav, sample_rate, (feature_root / f'{sample_id}.npy'))
zip_path = (out_root / 'fbank80.zip')
print('ZIPing features...')
create_zip(feature_root, zip_path)
print('Fetching ZIP manifest...')
(audio_paths, audio_lengths) = get_zip_manifest(zip_path)
print('Generating manifest...')
train_text = []
for split in SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for (_, _, utt, spk_id, chapter_no, utt_no) in tqdm(dataset):
sample_id = f'{spk_id}-{chapter_no}-{utt_no}'
manifest['id'].append(sample_id)
manifest['audio'].append(audio_paths[sample_id])
manifest['n_frames'].append(audio_lengths[sample_id])
manifest['tgt_text'].append(utt.lower())
manifest['speaker'].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), (out_root / f'{split}.tsv'))
if split.startswith('train'):
train_text.extend(manifest['tgt_text'])
vocab_size = ('' if (args.vocab_type == 'char') else str(args.vocab_size))
spm_filename_prefix = f'spm_{args.vocab_type}{vocab_size}'
with NamedTemporaryFile(mode='w') as f:
for t in train_text:
f.write((t + '\n'))
gen_vocab(Path(f.name), (out_root / spm_filename_prefix), args.vocab_type, args.vocab_size)
gen_config_yaml(out_root, spm_filename=(spm_filename_prefix + '.model'), specaugment_policy='ld')
shutil.rmtree(feature_root) |
class Decoder(metaclass=ABCMeta):
def __init__(self, model: Decodable):
self.model = model
def decode(self, spectra: torch.FloatTensor, precursors: torch.FloatTensor, *args, **kwargs) -> list[list[str]]:
pass |
class TimeoutLock(asyncio.Lock):
def __init__(self, timeout, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timeout = timeout
async def acquire(self) -> Literal[True]:
try:
return (await asyncio.wait_for(super().acquire(), self.timeout))
except TimeoutError:
print(ColorMessage.yellow('LOCK TIMEOUT'))
raise
def handle(self, lock: asyncio.Lock):
class _Handler():
def __init__(self, timeout_lock: TimeoutLock, handle_lock: asyncio.Lock):
self.timeout_lock = timeout_lock
self.handle_lock = handle_lock
self.locked = False
async def __aenter__(self):
try:
(await self.timeout_lock.acquire())
self.locked = True
finally:
self.handle_lock.release()
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.locked:
self.timeout_lock.release()
return _Handler(self, lock) |
def loess(xvals, yvals, alpha, poly_degree=1, robustify=False):
all_data = sorted(zip(xvals, yvals), key=(lambda x: x[0]))
(xvals, yvals) = zip(*all_data)
locsDF = pd.DataFrame(columns=['loc', 'x', 'weights', 'v', 'y', 'raw_dists', 'scale_factor', 'scaled_dists'])
evalDF = pd.DataFrame(columns=['loc', 'est', 'b', 'v', 'g'])
n = len(xvals)
m = (n + 1)
q = int((np.floor((n * alpha)) if (alpha <= 1.0) else n))
avg_interval = ((max(xvals) - min(xvals)) / len(xvals))
v_lb = max(0, (min(xvals) - (0.5 * avg_interval)))
v_ub = (max(xvals) + (0.5 * avg_interval))
v = enumerate(np.linspace(start=v_lb, stop=v_ub, num=m), start=1)
xcols = [np.ones_like(xvals)]
for j in range(1, (poly_degree + 1)):
xcols.append([(i ** j) for i in xvals])
X = np.vstack(xcols).T
for i in v:
iterpos = i[0]
iterval = i[1]
iterdists = sorted([(j, np.abs((j - iterval))) for j in xvals], key=(lambda x: x[1]))
(_, raw_dists) = zip(*iterdists)
scale_fact = raw_dists[(q - 1)]
scaled_dists = [(j[0], (j[1] / scale_fact)) for j in iterdists]
weights = [(j[0], (((1 - np.abs((j[1] ** 3))) ** 3) if (j[1] <= 1) else 0)) for j in scaled_dists]
(_, weights) = zip(*sorted(weights, key=(lambda x: x[0])))
(_, raw_dists) = zip(*sorted(iterdists, key=(lambda x: x[0])))
(_, scaled_dists) = zip(*sorted(scaled_dists, key=(lambda x: x[0])))
iterDF1 = pd.DataFrame({'loc': iterpos, 'x': xvals, 'v': iterval, 'weights': weights, 'y': yvals, 'raw_dists': raw_dists, 'scale_fact': scale_fact, 'scaled_dists': scaled_dists})
locsDF = pd.concat([locsDF, iterDF1])
W = np.diag(weights)
y = yvals
b = (np.linalg.pinv(((X.T W) X)) ((X.T W) y))
local_est = loc_eval(iterval, b)
iterDF2 = pd.DataFrame({'loc': [iterpos], 'b': [b], 'v': [iterval], 'g': [local_est]})
evalDF = pd.concat([evalDF, iterDF2])
locsDF.reset_index(inplace=True)
locsDF.drop('index', axis=1, inplace=True)
locsDF['est'] = 0
evalDF['est'] = 0
locsDF = locsDF[['loc', 'est', 'v', 'x', 'y', 'raw_dists', 'scale_fact', 'scaled_dists', 'weights']]
if (robustify == True):
cycle_nbr = 1
robust_est = [evalDF]
while True:
revalDF = pd.DataFrame(columns=['loc', 'est', 'v', 'b', 'g'])
for i in robust_est[(- 1)]['loc']:
prevDF = robust_est[(- 1)]
locDF = locsDF[(locsDF['loc'] == i)]
b_i = prevDF.loc[((prevDF['loc'] == i), 'b')].item()
w_i = locDF['weights']
v_i = prevDF.loc[((prevDF['loc'] == i), 'v')].item()
g_i = prevDF.loc[((prevDF['loc'] == i), 'g')].item()
e1_i = [(k - loc_eval(j, b_i)) for (j, k) in zip(xvals, yvals)]
e2_i = [(j / (6 * np.median(np.abs(e1_i)))) for j in e1_i]
r_i = [(((1 - np.abs((j ** 2))) ** 2) if (np.abs(j) < 1) else 0) for j in e2_i]
w_f = [(j * k) for (j, k) in zip(w_i, r_i)]
W_r = np.diag(w_f)
b_r = (np.linalg.pinv(((X.T W_r) X)) ((X.T W_r) y))
riter_est = loc_eval(v_i, b_r)
riterDF = pd.DataFrame({'loc': [i], 'b': [b_r], 'v': [v_i], 'g': [riter_est], 'est': [cycle_nbr]})
revalDF = pd.concat([revalDF, riterDF])
robust_est.append(revalDF)
idiffs = np.abs(((robust_est[(- 2)]['g'] - robust_est[(- 1)]['g']) / robust_est[(- 2)]['g']))
if (np.all((idiffs < 0.05)) or (cycle_nbr > 20)):
break
cycle_nbr += 1
evalDF = pd.concat(robust_est)
evalDF.reset_index(inplace=True)
evalDF.drop('index', axis=1, inplace=True)
evalDF = evalDF[['loc', 'est', 'v', 'b', 'g']]
return (locsDF, evalDF) |
class ModuleTransfer():
src: nn.Module
dest: nn.Module
verbose: int = 0
src_skip: List = field(default_factory=list)
dest_skip: List = field(default_factory=list)
def __call__(self, x: Tensor):
dest_traced = Tracker(self.dest)(x).parametrized
src_traced = Tracker(self.src)(x).parametrized
src_traced = list(filter((lambda x: (type(x) not in self.src_skip)), src_traced))
dest_traced = list(filter((lambda x: (type(x) not in self.dest_skip)), dest_traced))
if (len(dest_traced) != len(src_traced)):
raise Exception(f'Numbers of operations are different. Source module has {len(src_traced)} operations while destination module has {len(dest_traced)}.')
for (dest_m, src_m) in zip(dest_traced, src_traced):
dest_m.load_state_dict(src_m.state_dict())
if (self.verbose == 1):
print(f'Transfered from={src_m} to={dest_m}') |
def scatter(inputs, target_gpus, dim=0):
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, None, dim, obj)
assert (not torch.is_tensor(obj)), 'Tensors not supported in scatter.'
if (isinstance(obj, tuple) and (len(obj) > 0)):
return list(zip(*map(scatter_map, obj)))
if (isinstance(obj, list) and (len(obj) > 0)):
return list(map(list, zip(*map(scatter_map, obj))))
if (isinstance(obj, dict) and (len(obj) > 0)):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
try:
return scatter_map(inputs)
finally:
scatter_map = None |
_parse
def main(gpus: Param('The GPUs to use for distributed training', str)='all', script: Param('Script to run', str, opt=False)='', args: Param('Args to pass to script', nargs='...', opt=False)=''):
current_env = os.environ.copy()
gpus = (list(range(torch.cuda.device_count())) if (gpus == 'all') else list(gpus))
current_env['WORLD_SIZE'] = str(len(gpus))
current_env['MASTER_ADDR'] = '127.0.0.1'
current_env['MASTER_PORT'] = '29500'
processes = []
for (i, gpu) in enumerate(gpus):
current_env['RANK'] = str(i)
cmd = ([sys.executable, '-u', script, f'--gpu={gpu}'] + args)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait() |
def moving_average(feat, saved_ma, alpha):
if (len(saved_ma) == 0):
ema = feat
else:
ema = ((saved_ma * alpha) + (feat * (1 - alpha)))
return ema |
def get(params, optimizer, learning_rate=None, decay=None, weight_decay=0):
if isinstance(optimizer, torch.optim.Optimizer):
optim = optimizer
elif (optimizer in ['L-BFGS', 'L-BFGS-B']):
if (weight_decay > 0):
raise ValueError("L-BFGS optimizer doesn't support weight_decay > 0")
if ((learning_rate is not None) or (decay is not None)):
print('Warning: learning rate is ignored for {}'.format(optimizer))
optim = torch.optim.LBFGS(params, lr=1, max_iter=LBFGS_options['iter_per_step'], max_eval=LBFGS_options['fun_per_step'], tolerance_grad=LBFGS_options['gtol'], tolerance_change=LBFGS_options['ftol'], history_size=LBFGS_options['maxcor'], line_search_fn=None)
else:
if (learning_rate is None):
raise ValueError('No learning rate for {}.'.format(optimizer))
if (optimizer == 'sgd'):
optim = torch.optim.SGD(params, lr=learning_rate, weight_decay=weight_decay)
elif (optimizer == 'rmsprop'):
optim = torch.optim.RMSprop(params, lr=learning_rate, weight_decay=weight_decay)
elif (optimizer == 'adam'):
optim = torch.optim.Adam(params, lr=learning_rate, weight_decay=weight_decay)
elif (optimizer == 'adamw'):
if (weight_decay == 0):
raise ValueError('AdamW optimizer requires non-zero weight decay')
optim = torch.optim.AdamW(params, lr=learning_rate, weight_decay=weight_decay)
else:
raise NotImplementedError(f'{optimizer} to be implemented for backend pytorch.')
lr_scheduler = _get_learningrate_scheduler(optim, decay)
return (optim, lr_scheduler) |
class TestCountOpsPass(QiskitTestCase):
def test_empty_dag(self):
circuit = QuantumCircuit()
dag = circuit_to_dag(circuit)
pass_ = CountOps()
_ = pass_.run(dag)
self.assertDictEqual(pass_.property_set['count_ops'], {})
def test_just_qubits(self):
qr = QuantumRegister(2)
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[1], qr[0])
circuit.cx(qr[1], qr[0])
dag = circuit_to_dag(circuit)
pass_ = CountOps()
_ = pass_.run(dag)
self.assertDictEqual(pass_.property_set['count_ops'], {'cx': 6, 'h': 2}) |
def update_user_topic(topic_id, user_id, state):
conn = getDb()
with closing(conn.cursor(dictionary=True)) as cur:
user_topics_sql = 'insert into user_topics values (%s,%s,%s,%s)'
topic_recommendations_sql = 'update topic_recommendations set clicked = %s\n where user_id = %s and topic_id = %s and interleaving_order is not null'
current_time = datetime.utcnow()
cur.execute(user_topics_sql, (user_id, topic_id, state, current_time))
cur.execute(topic_recommendations_sql, (current_time, user_id, topic_id))
conn.commit()
return (cur.rowcount == 1) |
_materialize('core')
class Atan(TrigonometricOp):
in_dtypes = [(i,) for i in DTYPE_GEN_FLOATS]
out_dtypes = [(i,) for i in DTYPE_GEN_FLOATS] |
def assert_allclose(tensor, value, tol=1e-05, message=''):
assert ((tensor - value).abs() < tol).all(), message |
def _get_patch_map():
global _mapping_fastchat
if (_mapping_fastchat is None):
_mapping_fastchat = []
from fastchat.model import model_adapter
_mapping_fastchat += [[BaseModelAdapter, 'load_model', load_model_base, None], [ChatGLMAdapter, 'load_model', load_model_chatglm, None], [model_adapter, 'load_model', load_model, None]]
return _mapping_fastchat |
def lr_decay():
global optimizer
for params in optimizer.param_groups:
params['lr'] *= 0.1
lr = params['lr']
print('Learning rate adjusted to {}'.format(lr)) |
class CamembertTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__mask__ = mask
module.apply(add_flops_mask_func) |
class Database():
def __init__(self, db_name, influxdb_host, influxdb_port):
self.db_name = db_name
self.host = influxdb_host
self.port = influxdb_port
self.conn = InfluxDBClient(host=self.host, port=self.port)
self.conn.drop_database(self.db_name)
self.db = self.create(self.conn, self.db_name)
def create(self, conn, db_name):
conn.create_database(db_name)
conn.switch_database(db_name)
return conn
def insert(self, json_body):
self.db.write_points(json_body)
def delete(self, query):
self.db.drop_measurement(query)
def delete_measurement(self, query):
self.db.query(query)
def select(self, query):
result = self.db.query(query)
return result |
class MLP(nn.Module):
def __init__(self, in_dim, hidden_list, out_dim, activation='relu'):
super().__init__()
assert (activation in ['relu', 'tanh', 'gelu'])
self.layers = nn.ModuleList()
self.layers.append(nn.Linear(in_dim, hidden_list[0]))
self.layers.append(activations[activation])
for i in range((len(hidden_list) - 1)):
self.layers.append(nn.Linear(hidden_list[i], hidden_list[(i + 1)]))
self.layers.append(activations[activation])
self.layers.append(nn.Linear(hidden_list[(- 1)], out_dim))
def forward(self, x):
out = x
for layer in self.layers:
out = layer(out)
return out |
class BWStyle():
def __init__(self):
self.tc = '#000000'
self.sc = '#000000'
self.lc = '#000000'
self.cc = '#778899'
self.gc = '#ffffff'
self.gt = '#000000'
self.bc = '#bdbdbd'
self.bg = '#ffffff'
self.fs = 13
self.sfs = 8
self.disptex = {'id': 'Id', 'u0': 'U_0', 'u1': 'U_1', 'u2': 'U_2', 'u3': 'U_3', 'x': 'X', 'y': 'Y', 'z': 'Z', 'h': 'H', 's': 'S', 'sdg': 'S^\\dagger', 't': 'T', 'tdg': 'T^\\dagger', 'rx': 'R_x', 'ry': 'R_y', 'rz': 'R_z', 'reset': '\\left|0\\right\\rangle'}
self.dispcol = {'id': '#ffffff', 'u0': '#ffffff', 'u1': '#ffffff', 'u2': '#ffffff', 'u3': '#ffffff', 'x': '#ffffff', 'y': '#ffffff', 'z': '#ffffff', 'h': '#ffffff', 's': '#ffffff', 'sdg': '#ffffff', 't': '#ffffff', 'tdg': '#ffffff', 'rx': '#ffffff', 'ry': '#ffffff', 'rz': '#ffffff', 'reset': '#ffffff', 'target': '#ffffff', 'meas': '#ffffff'}
self.latexmode = True
self.pimode = False
self.fold = 20
self.bundle = False
self.barrier = True
self.index = False
self.figwidth = (- 1)
self.dpi = 150
self.margin = [2.0, 0.0, 0.0, 0.3]
self.cline = 'doublet'
def set_style(self, dic):
self.tc = dic.get('textcolor', self.tc)
self.sc = dic.get('subtextcolor', self.sc)
self.lc = dic.get('linecolor', self.lc)
self.cc = dic.get('creglinecolor', self.cc)
self.gt = dic.get('gatetextcolor', self.tc)
self.gc = dic.get('gatefacecolor', self.gc)
self.bc = dic.get('barrierfacecolor', self.bc)
self.bg = dic.get('backgroundcolor', self.bg)
self.fs = dic.get('fontsize', self.fs)
self.sfs = dic.get('subfontsize', self.sfs)
self.disptex = dic.get('displaytext', self.disptex)
for key in self.dispcol.keys():
self.dispcol[key] = self.gc
self.dispcol = dic.get('displaycolor', self.dispcol)
self.latexmode = dic.get('latexdrawerstyle', self.latexmode)
self.pimode = dic.get('usepiformat', self.pimode)
self.fold = dic.get('fold', self.fold)
if (self.fold < 2):
self.fold = (- 1)
self.bundle = dic.get('cregbundle', self.bundle)
self.barrier = dic.get('plotbarrier', self.barrier)
self.index = dic.get('showindex', self.index)
self.figwidth = dic.get('figwidth', self.figwidth)
self.dpi = dic.get('dpi', self.dpi)
self.margin = dic.get('margin', self.margin)
self.cline = dic.get('creglinestyle', self.cline) |
def get_args():
parser = argparse.ArgumentParser()
home = os.path.expanduser('~')
source_dir = os.path.join(home, 'data', 'squad')
target_dir = 'data/squad'
glove_dir = os.path.join(home, 'data', 'glove')
parser.add_argument('-s', '--source_dir', default=source_dir)
parser.add_argument('-t', '--target_dir', default=target_dir)
parser.add_argument('--train_name', default='train-v1.1.json')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('--train_ratio', default=0.9, type=int)
parser.add_argument('--glove_corpus', default='6B')
parser.add_argument('--glove_dir', default=glove_dir)
parser.add_argument('--glove_vec_size', default=100, type=int)
parser.add_argument('--mode', default='full', type=str)
parser.add_argument('--single_path', default='', type=str)
parser.add_argument('--tokenizer', default='PTB', type=str)
parser.add_argument('--url', default='vision-server2.corp.ai2', type=str)
parser.add_argument('--port', default=8000, type=int)
parser.add_argument('--split', action='store_true')
parser.add_argument('--suffix', default='')
return parser.parse_args() |
class TestScore(unittest.TestCase):
def test_score(self):
metric = CiderMetric(tokenize=False)
score = metric.evaluate_batch(CANDS, REFS)
ref = 2.
self.assertTrue(((score['cider'] - ref) < EPS)) |
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
def train(model_name):
writer = SummaryWriter(log_dir=Settings.FULL_LOG_DIR)
for (key, value) in Settings.export_settings().items():
writer.add_text(key, str(value))
if Settings.INIT_MODEL_NAME:
dqn = DQN.load(Settings.INIT_MODEL_NAME)
else:
dqn = DQN(dropout=Settings.USE_DROPOUT)
reward_function = get_reward_function()
criterion = nn.SmoothL1Loss()
optimizer = optim.Adam(dqn.parameters(), lr=Settings.LEARNING_RATE)
target_dqn = copy.deepcopy(dqn)
target_dqn.eval()
if Settings.USE_PRIORITIZED_ER:
history = SumTree(capacity=Settings.REPLAY_BUFFER_SIZE)
else:
history = deque(maxlen=Settings.REPLAY_BUFFER_SIZE)
for iteration in tqdm(range(Settings.NUM_TRAINING_EPISODES)):
epsilon = (Settings.EPS_END + ((Settings.EPS_START - Settings.EPS_END) * np.exp(((- Settings.EPS_DECAY_COEFFICIENT) * np.floor((iteration / Settings.EPS_DECAY_RATE))))))
if (((iteration % Settings.TARGET_NET_FREEZE_PERIOD) == 0) and (iteration != 0)):
target_dqn = copy.deepcopy(dqn)
target_dqn.eval()
if (((iteration % Settings.EVALUATION_PERIOD) == 0) and (iteration != 0)):
evaluate_q_model_and_log_metrics(dqn, iteration, writer, reward_function)
writer.add_scalar('epsilon', epsilon, iteration)
dqn.checkpoint('{}_checkpoint_{}'.format(model_name, iteration))
control_function = partial(do_dqn_control, dqn=dqn, epsilon=epsilon)
episode_metrics = control.run_episode(control_function=control_function, state_function=prediction.HighwayState.from_sumo, max_episode_length=Settings.TRAINING_EPISODE_LENGTH, limit_metrics=True)
episode_history = rl.get_history(episode_metrics, reward_function)
if Settings.USE_PRIORITIZED_ER:
for item in episode_history:
history.add_node(item, (Settings.PER_MAX_PRIORITY ** Settings.PER_ALPHA))
else:
history.extend(episode_history)
if ((iteration % 10) == 0):
writer.add_scalar('Length', len(episode_history), iteration)
total_loss = 0
for train_index in range(Settings.TRAINING_STEPS_PER_EPISODE):
if Settings.USE_PRIORITIZED_ER:
train_sars = []
train_indices = []
for k in range(min(len(history), Settings.BATCH_SIZE)):
(position, sars) = history.sample()
train_sars.append(sars)
train_indices.append(position)
else:
train_sars = random.choices(history, k=min(len(history), Settings.BATCH_SIZE))
train_indices = []
targets = get_targets(train_sars, dqn, target_dqn, gamma=Settings.DISCOUNT_FACTOR)
target_tensor = dqn.get_target_tensor_bulk(targets)
state_tensor = dqn.get_q_tensor_bulk([item.state for item in train_sars])
action_tensor = dqn.get_action_tensor_bulk([item.action for item in train_sars]).reshape(((- 1), 1))
optimizer.zero_grad()
outputs = dqn.forward(state_tensor)
q_values = torch.gather(outputs, 1, action_tensor).flatten()
loss = criterion(q_values, target_tensor)
loss.backward()
optimizer.step()
if Settings.USE_PRIORITIZED_ER:
td_errors = torch.abs((q_values - target_tensor))
for (error_index, error) in enumerate(td_errors):
priority = (min((error + Settings.PER_MIN_PRIORITY), Settings.PER_MAX_PRIORITY) ** Settings.PER_ALPHA)
history.update_weight(priority, train_indices[error_index])
total_loss += loss
if ((iteration % 10) == 0):
writer.add_scalar('Loss', (total_loss / Settings.TRAINING_STEPS_PER_EPISODE), iteration)
evaluate_q_model_and_log_metrics(dqn, Settings.NUM_TRAINING_EPISODES, writer, reward_function)
dqn.save(model_name)
writer.close() |
_module()
class FPN(nn.Module):
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=(- 1), add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None, upsample_cfg=dict(mode='nearest')):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if (end_level == (- 1)):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = end_level
assert (end_level <= len(in_channels))
assert (num_outs == (end_level - start_level))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
assert (add_extra_convs in ('on_input', 'on_lateral', 'on_output'))
elif add_extra_convs:
if extra_convs_on_inputs:
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=(norm_cfg if (not self.no_norm_on_lateral) else None), act_cfg=act_cfg, inplace=False)
fpn_conv = ConvModule(out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
if (self.add_extra_convs and (extra_levels >= 1)):
for i in range(extra_levels):
if ((i == 0) and (self.add_extra_convs == 'on_input')):
in_channels = self.in_channels[(self.backbone_end_level - 1)]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
laterals = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
used_backbone_levels = len(laterals)
for i in range((used_backbone_levels - 1), 0, (- 1)):
if ('scale_factor' in self.upsample_cfg):
laterals[(i - 1)] += F.interpolate(laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[(i - 1)].shape[2:]
laterals[(i - 1)] += F.interpolate(laterals[i], size=prev_shape, **self.upsample_cfg)
outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)]
if (self.num_outs > len(outs)):
if (not self.add_extra_convs):
for i in range((self.num_outs - used_backbone_levels)):
outs.append(F.max_pool2d(outs[(- 1)], 1, stride=2))
else:
if (self.add_extra_convs == 'on_input'):
extra_source = inputs[(self.backbone_end_level - 1)]
elif (self.add_extra_convs == 'on_lateral'):
extra_source = laterals[(- 1)]
elif (self.add_extra_convs == 'on_output'):
extra_source = outs[(- 1)]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range((used_backbone_levels + 1), self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[(- 1)])))
else:
outs.append(self.fpn_convs[i](outs[(- 1)]))
return tuple(outs) |
def test_obtain_exact_trajectories(ray_local_session_fixture):
del ray_local_session_fixture
assert ray.is_initialized()
max_path_length = 15
n_workers = 8
env = GarageEnv(PointEnv())
per_worker_actions = [env.action_space.sample() for _ in range(n_workers)]
policies = [FixedPolicy(env.spec, ([action] * max_path_length)) for action in per_worker_actions]
workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers)
sampler = RaySampler.from_worker_factory(workers, policies, envs=env)
n_traj_per_worker = 3
rollouts = sampler.obtain_exact_trajectories(n_traj_per_worker, policies)
assert (sum(rollouts.lengths) >= (n_workers * n_traj_per_worker))
assert (len(rollouts.lengths) == (n_workers * n_traj_per_worker))
worker = (- 1)
for (count, rollout) in enumerate(rollouts.split()):
if ((count % n_traj_per_worker) == 0):
worker += 1
assert (rollout.actions == per_worker_actions[worker]).all() |
class SetDataset():
def __init__(self, batch_size, transform):
self.sub_meta = {}
self.cl_list = range(38)
for cl in self.cl_list:
self.sub_meta[cl] = []
d = ImageFolder((CropDisease_path + '/dataset/train/'), loader=(lambda path: path))
for (i, (data, label)) in enumerate(d):
self.sub_meta[label].append(data)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform=transform)
self.sub_dataloader.append(torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params))
def __getitem__(self, i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.sub_dataloader) |
def print_args(args, print_list):
s = '\n'
l = len(print_list)
for (arg, content) in args.__dict__.items():
if ((l == 0) or (arg in print_list)):
s += '{}:{}\n'.format(arg, content)
return s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.