code stringlengths 101 5.91M |
|---|
def test_numerical_columns_gets_pii():
data = pd.DataFrame(data={'id': [0, 1, 2, 3, 4], 'city': [0, 0, 0, 0, 0], 'numerical': [21, 22, 23, 24, 25]})
metadata = SingleTableMetadata.load_from_dict({'primary_key': 'id', 'columns': {'id': {'sdtype': 'id'}, 'city': {'sdtype': 'city'}, 'numerical': {'sdtype': 'numerical'}}})
synth = GaussianCopulaSynthesizer(metadata, default_distribution='truncnorm')
synth.fit(data)
sampled = synth.sample(10)
expected_sampled = pd.DataFrame({'id': {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}, 'city': {0: 'Danielfort', 1: 'Glendaside', 2: 'Port Jenniferchester', 3: 'Port Susan', 4: 'West Michellemouth', 5: 'West Jason', 6: 'Ryanfort', 7: 'West Stephenland', 8: 'Davidland', 9: 'Port Christopher'}, 'numerical': {0: 22, 1: 24, 2: 22, 3: 23, 4: 22, 5: 24, 6: 23, 7: 24, 8: 24, 9: 24}})
pd.testing.assert_frame_equal(expected_sampled, sampled) |
class _PyAccess32_3(PyAccess):
def _post_init(self, *args, **kwargs):
self.pixels = ffi.cast('struct Pixel_RGBA **', self.image32)
def get_pixel(self, x, y):
pixel = self.pixels[y][x]
return (pixel.r, pixel.g, pixel.b)
def set_pixel(self, x, y, color):
pixel = self.pixels[y][x]
pixel.r = min(color[0], 255)
pixel.g = min(color[1], 255)
pixel.b = min(color[2], 255)
pixel.a = 255 |
class TestDSModifierMultyModification():
def test_modifier_multiple_initialization(self):
ds_modifier = DSModifier(DSModifier())
composed_names = '{}#{}'.format(modifier_name, modifier_name)
assert (ds_modifier.name == modifier_name)
assert (ds_modifier._get_name() == composed_names)
def test_dataset_modification(self):
output_path = os.path.join(base_ds, (ds_name + '#{}#{}'.format(modifier_name, modifier_name)))
output_images_path = os.path.join(output_path, 'images')
ds_modifier = DSModifier(DSModifier())
(modified_input_path, mod_path, parent_folder) = ds_modifier.modify(data_path=data_path)
assert (mod_path == output_path)
assert os.path.exists(mod_path)
assert (modified_input_path == output_images_path)
assert os.path.exists(modified_input_path)
assert check_modification(input_path, modified_input_path), 'modified images should be copied in {} and named the same'.format(output_images_path)
shutil.rmtree(mod_path)
def test_log_parameters(self):
ds_modifier = DSModifier(DSModifier())
expected_log_params = {'modifier': 'base_modifier#base_modifier'}
assert (ds_modifier.log_parameters() == expected_log_params) |
class ConstantPadNd(Function):
def symbolic(g, input, pad, value=0):
paddings = prepare_onnx_paddings(len(input.type().sizes()), pad)
return g.op('Pad', input, pads_i=paddings, mode_s='constant', value_f=value)
def forward(ctx, input, pad, value=0):
ctx.pad = pad
ctx.value = value
ctx.input_size = input.size()
ctx.l_inp = len(input.size())
ctx.pad_tup = tuple([(a, b) for (a, b) in zip(pad[:(- 1):2], pad[1::2])][::(- 1)])
ctx.l_pad = len(ctx.pad_tup)
ctx.l_diff = (ctx.l_inp - ctx.l_pad)
assert (ctx.l_inp >= ctx.l_pad)
new_dim = tuple([sum(((d,) + ctx.pad_tup[i])) for (i, d) in enumerate(input.size()[(- ctx.l_pad):])])
assert all([(d > 0) for d in new_dim]), 'input is too small'
output = input.new((input.size()[:ctx.l_diff] + new_dim)).fill_(ctx.value)
c_input = input
for (i, p) in zip(range(ctx.l_inp)[(- ctx.l_pad):], ctx.pad_tup):
if (p[0] < 0):
c_input = c_input.narrow(i, (- p[0]), (c_input.size(i) + p[0]))
if (p[1] < 0):
c_input = c_input.narrow(i, 0, (c_input.size(i) + p[1]))
c_output = output
for (i, p) in zip(range(ctx.l_inp)[(- ctx.l_pad):], ctx.pad_tup):
if (p[0] > 0):
c_output = c_output.narrow(i, p[0], (c_output.size(i) - p[0]))
if (p[1] > 0):
c_output = c_output.narrow(i, 0, (c_output.size(i) - p[1]))
c_output.copy_(c_input)
return output
def backward(ctx, grad_output):
grad_input = Variable(grad_output.data.new(ctx.input_size).zero_())
grad_input_slices = [slice(0, x) for x in ctx.input_size]
def narrow_slice(dim, start, length):
grad_input_slices[dim] = slice((grad_input_slices[dim].start + start), ((grad_input_slices[dim].start + start) + length))
def slice_length(dim):
return (grad_input_slices[dim].stop - grad_input_slices[dim].start)
for (i, p) in zip(range(ctx.l_inp)[(- ctx.l_pad):], ctx.pad_tup):
if (p[0] < 0):
narrow_slice(i, (- p[0]), (slice_length(i) + p[0]))
if (p[1] < 0):
narrow_slice(i, 0, (slice_length(i) + p[1]))
cg_output = grad_output
for (i_s, p) in zip(range(ctx.l_inp)[(- ctx.l_pad):], ctx.pad_tup):
if (p[0] > 0):
cg_output = cg_output.narrow(i_s, p[0], (cg_output.size(i_s) - p[0]))
if (p[1] > 0):
cg_output = cg_output.narrow(i_s, 0, (cg_output.size(i_s) - p[1]))
gis = tuple(grad_input_slices)
grad_input[gis] = cg_output
return (grad_input, None, None) |
def eval_distinct2(hyps_resp):
if (len(hyps_resp) == 0):
print('ERROR, eval_distinct get empty input')
return
if (type(hyps_resp[0]) != list):
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(type(hyps_resp[0])))
return
hyps_resp = [[str(x) for x in l] for l in hyps_resp]
hyps_resp = [' '.join(i).split() for i in hyps_resp]
num_tokens = sum([len(i) for i in hyps_resp])
dist1 = (count_ngram(hyps_resp, 1) / float(num_tokens))
dist2 = (count_ngram(hyps_resp, 2) / float(num_tokens))
if (not (0 <= dist2 <= 1)):
dist2 = 0.0
return dist2 |
class DRODataset(Dataset):
def __init__(self, dataset, process_item_fn, n_groups, n_classes, group_str_fn):
self.dataset = dataset
self.process_item = process_item_fn
self.n_groups = n_groups
self.n_classes = n_classes
self.group_str = group_str_fn
group_array = []
y_array = []
for (x, y, g) in self:
group_array.append(g)
y_array.append(y)
self._group_array = torch.LongTensor(group_array)
self._y_array = torch.LongTensor(y_array)
self._group_counts = (torch.arange(self.n_groups).unsqueeze(1) == self._group_array).sum(1).float()
self._y_counts = (torch.arange(self.n_classes).unsqueeze(1) == self._y_array).sum(1).float()
def __getitem__(self, idx):
if (self.process_item is None):
return self.dataset[idx]
else:
return self.process_item(self.dataset[idx])
def __len__(self):
return len(self.dataset)
def group_counts(self):
return self._group_counts
def class_counts(self):
return self._y_counts
def input_size(self):
for (x, y, g) in self:
return x.size()
def get_loader(self, train, reweight_groups, **kwargs):
if (not train):
assert (reweight_groups is None)
shuffle = False
sampler = None
elif (not reweight_groups):
shuffle = True
sampler = None
else:
group_weights = (len(self) / self._group_counts)
weights = group_weights[self._group_array]
sampler = WeightedRandomSampler(weights, len(self), replacement=True)
shuffle = False
loader = DataLoader(self, shuffle=shuffle, sampler=sampler, **kwargs)
return loader |
class lazydict(object):
def __init__(self, **kwargs):
self._lazy_dict = kwargs
self._dict = {}
def __getitem__(self, key):
if (key not in self._dict):
self._dict[key] = self._lazy_dict[key]()
return self._dict[key]
def __setitem__(self, i, y):
self.set(i, y)
def get(self, key, default=None):
if (key in self._lazy_dict):
return self[key]
return default
def set(self, key, value):
self._lazy_dict[key] = value |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, criterion):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=False)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=False)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.CAM = CAM(2048, 256)
self.RRB5a = RRB(256, 256)
self.CAB5 = CAB(256)
self.RRB5b = RRB(256, 256)
self.RRB4a = RRB(2048, 256)
self.CAB4 = CAB(256)
self.RRB4b = RRB(256, 256)
self.RRB3a = RRB(1024, 256)
self.CAB3 = CAB(256)
self.RRB3b = RRB(256, 256)
self.RRB2a = RRB(512, 256)
self.CAB2 = CAB(256)
self.RRB2b = RRB(256, 256)
self.RRB1a = RRB(256, 256)
self.dsn = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), InPlaceABNSync(256), nn.Dropout2d(0.1), nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
self.head = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), InPlaceABNSync(256), nn.Dropout2d(0.1), nn.Conv2d(256, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
self.criterion = criterion
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion), affine=affine_par))
layers = []
generate_multi_grid = (lambda index, grids: (grids[(index % len(grids))] if isinstance(grids, tuple) else 1))
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid)))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))
return nn.Sequential(*layers)
def forward(self, x, labels=None):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x1 = self.layer1(x)
d1 = self.RRB1a(x1)
x2 = self.layer2(x1)
d2 = self.RRB2a(x2)
d2 = self.CAB2(d1, d2)
d2 = self.RRB2b(d2)
x3 = self.layer3(x2)
d3 = self.RRB3a(x3)
d3 = self.CAB3(d2, d3)
d3 = self.RRB3b(d3)
dsn = self.dsn(d3)
x4 = self.layer4(x3)
d4 = self.RRB4a(x4)
d4 = self.CAB4(d3, d4)
d4 = self.RRB4b(d4)
x5 = self.CAM(x4)
d5 = self.RRB5a(x5)
d5 = self.CAB5(d4, d5)
d5 = self.RRB5b(d5)
out = self.head(d5)
outs = [out, dsn]
if ((self.criterion is not None) and (labels is not None)):
return self.criterion(outs, labels)
else:
return outs
return [out]
def init(self, restore_from):
saved_state_dict = torch.load(restore_from)
new_params = self.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if (not (i_parts[0] == 'fc')):
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
self.load_state_dict(new_params) |
def nonl(x, cfg, inplace=False):
_s = get_parameter_or_create('Asize', (), ConstantInitializer(np.prod(x.shape[1:])), need_grad=False)
delta = cfg.a_stepsize
xmax = (delta * ((2.0 ** cfg.a_bitwidth) - 1))
if ((cfg.a_quantize is not None) and ('pow2' in cfg.a_quantize)):
xmax = (2.0 ** np.round(np.log2(xmax)))
xmin = (xmax / (2.0 ** ((2.0 ** (cfg.a_bitwidth - 1)) - 1)))
xmin = np.clip(xmin, (cfg.a_xmin_min + 1e-05), (cfg.a_xmin_max - 1e-05))
xmax = np.clip(xmax, (cfg.a_xmax_min + 1e-05), (cfg.a_xmax_max - 1e-05))
print(f'We use default delta ({(delta, xmax)}) for quantized nonlinearity.')
if (cfg.a_quantize == 'fp_relu'):
return F.fixed_point_quantize(x, sign=False, n=cfg.a_bitwidth, delta=cfg.a_stepsize)
elif (cfg.a_quantize == 'parametric_fp_b_xmax_relu'):
return PQ.parametric_fixed_point_quantize_b_xmax(x, sign=False, n_init=cfg.a_bitwidth, n_min=cfg.a_bitwidth_min, n_max=cfg.a_bitwidth_max, xmax_init=xmax, xmax_min=cfg.a_xmax_min, xmax_max=cfg.a_xmax_max, name='Aquant')
elif (cfg.a_quantize == 'parametric_fp_d_xmax_relu'):
return PQ.parametric_fixed_point_quantize_d_xmax(x, sign=False, d_init=delta, d_min=cfg.a_stepsize_min, d_max=cfg.a_stepsize_max, xmax_init=xmax, xmax_min=cfg.a_xmax_min, xmax_max=cfg.a_xmax_max, name='Aquant')
elif (cfg.a_quantize == 'parametric_fp_d_b_relu'):
return PQ.parametric_fixed_point_quantize_d_b(x, sign=False, n_init=cfg.a_bitwidth, n_min=cfg.a_bitwidth_min, n_max=cfg.a_bitwidth_max, d_init=delta, d_min=cfg.a_stepsize_min, d_max=cfg.a_stepsize_max, name='Aquant')
elif (cfg.a_quantize == 'pow2_relu'):
return F.pow2_quantize(x, sign=False, with_zero=True, n=cfg.a_bitwidth, m=np.round(np.log2(xmax)))
elif (cfg.a_quantize == 'parametric_pow2_b_xmax_relu'):
return PQ.parametric_pow2_quantize_b_xmax(x, sign=False, with_zero=True, n_init=cfg.a_bitwidth, n_min=cfg.a_bitwidth_min, n_max=cfg.a_bitwidth_max, xmax_init=xmax, xmax_min=cfg.a_xmax_min, xmax_max=cfg.a_xmax_max, name='Aquant')
elif (cfg.a_quantize == 'parametric_pow2_b_xmin_relu'):
return PQ.parametric_pow2_quantize_b_xmin(x, sign=False, with_zero=True, n_init=cfg.a_bitwidth, n_min=cfg.a_bitwidth_min, n_max=cfg.a_bitwidth_max, xmin_init=xmin, xmin_min=cfg.a_xmin_min, xmin_max=cfg.a_xmax_max, name='Aquant')
elif (cfg.a_quantize == 'parametric_pow2_xmin_xmax_relu'):
return PQ.parametric_pow2_quantize_xmin_xmax(x, sign=False, with_zero=True, xmin_init=xmin, xmin_min=cfg.a_xmin_min, xmin_max=cfg.a_xmax_max, xmax_init=xmax, xmax_min=cfg.a_xmax_min, xmax_max=cfg.a_xmax_max, name='Aquant')
else:
return F.relu(x, inplace=inplace) |
class GELU_VGG(nn.Module):
def __init__(self, vgg_name):
super(GELU_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 100)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.GELU()]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) |
def thread_wrapper(program, parent_tid, *args, **kwargs):
dsp_settings.initialize_for_thread(parent_tid)
return program(*args, **kwargs) |
def init_array(A):
n = N.get()
for i in range(n):
for j in range(n):
A[(i, j)] = (datatype(((i * (j + 2)) + 2)) / n) |
def _clean_loop_body(body: str) -> str:
if body.endswith('continue;\n'):
body = body[:(- len('continue;\n'))]
return body |
def register_Ns3TcpOptionNOP_methods(root_module, cls):
cls.add_constructor([param('ns3::TcpOptionNOP const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetKind', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
return |
def skipIfUnsupportedMaxOpsetVersion(min_opset_version):
def skip_dec(func):
def wrapper(self):
if (self.opset_version > min_opset_version):
raise unittest.SkipTest('Skip verify test for unsupported opset_version')
return func(self)
return wrapper
return skip_dec |
_model
def mpvit_xsmall(**kwargs):
model = MPViT(img_size=224, num_stages=4, num_path=[2, 3, 3, 3], num_layers=[1, 2, 4, 1], embed_dims=[64, 128, 192, 256], mlp_ratios=[4, 4, 4, 4], num_heads=[8, 8, 8, 8], **kwargs)
model.default_cfg = _cfg_mpvit()
return model |
class HerReplayBuffer(ReplayBuffer):
def __init__(self, replay_k, reward_fun, env_spec, size_in_transitions, time_horizon):
self._sample_transitions = make_her_sample(replay_k, reward_fun)
self._replay_k = replay_k
self._reward_fun = reward_fun
super().__init__(env_spec, size_in_transitions, time_horizon)
def sample(self, batch_size):
buffer = {}
for key in self._buffer:
buffer[key] = self._buffer[key][:self._current_size]
transitions = self._sample_transitions(buffer, batch_size)
for key in (['reward', 'next_observation', 'next_achieved_goal'] + list(self._buffer.keys())):
assert (key in transitions), ('key %s missing from transitions' % key)
return transitions
def __getstate__(self):
new_dict = self.__dict__.copy()
del new_dict['_sample_transitions']
return new_dict
def __setstate__(self, state):
self.__dict__ = state
replay_k = state['_replay_k']
reward_fun = state['_reward_fun']
self._sample_transitions = make_her_sample(replay_k, reward_fun) |
class Knots(Singleton, Parent):
def __init__(self):
Parent.__init__(self, category=Monoids().Infinite())
def _repr_(self):
return 'Knots'
def one(self):
return self.element_class([])
def an_element(self):
return self.element_class([[1, 5, 2, 4], [5, 3, 6, 2], [3, 1, 4, 6]])
def from_gauss_code(self, gauss):
orientations = recover_orientations(gauss)[3]
return Knot([[gauss], orientations])
def from_dowker_code(self, code):
gauss = dowker_to_gauss(code)
orientations = recover_orientations(gauss)[3]
return Knot([[gauss], orientations])
def from_table(self, n, k):
if (n > 10):
raise ValueError('more than 10 crossings, not in the knot table')
from sage.groups.braid import BraidGroup
if ((n, k) in small_knots_table):
(m, word) = small_knots_table[(n, k)]
G = BraidGroup(m)
return Knot(G(word))
else:
raise ValueError('not found in the knot table')
Element = Knot |
def test_channel_first_with_2_dim_obs() -> None:
env = DummyAtari(squeeze=True)
assert env.observation_space.shape
(width, height) = env.observation_space.shape
wrapper = ChannelFirst(env)
(observation, _) = wrapper.reset()
assert (observation.shape == (1, width, height))
(observation, _, _, _, _) = wrapper.step(wrapper.action_space.sample())
assert (observation.shape == (1, width, height))
dqn = DQNConfig().create()
dqn.build_with_env(wrapper)
dqn.predict(np.expand_dims(observation, axis=0)) |
def create_test_input(batch_size, height, width, channels):
if (None in [batch_size, height, width, channels]):
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(np.tile(np.reshape((np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width])), [1, height, width, 1]), [batch_size, 1, 1, channels])) |
def all_saved_variables(derivatives, key):
seen = set()
saved = []
for d in derivatives:
for saved_arg in d[key]:
if (saved_arg['name'] in seen):
continue
seen.add(saved_arg['name'])
saved.append(saved_arg)
return saved |
def main(data, split_num, year):
train_index_path = glob.glob('../data/controversy/raw_data/split/*trainSet_Twitter{}_{}*'.format(year, split_num))[0]
test_index_path = glob.glob('../data/controversy/raw_data/split/*testSet_Twitter{}_{}*'.format(year, split_num))[0]
train_data_output_path = '../data/controversy/processed_data/linear_structure/twitter{}/split_data/split_{}/train_unique.json'.format(year, split_num)
test_data_1_output_path = '../data/controversy/processed_data/linear_structure/twitter{}/split_data/split_{}/test_1_unique.json'.format(year, split_num)
test_data_2_output_path = '../data/controversy/processed_data/linear_structure/twitter{}/split_data/split_{}/test_2_unique.json'.format(year, split_num)
train_data_output_small_path = '../data/controversy/processed_data/linear_structure/twitter{}/split_data/split_{}/train_small_unique.json'.format(year, split_num)
test_data_1_output_small_path = '../data/controversy/processed_data/linear_structure/twitter{}/split_data/split_{}/test_1_small_unique.json'.format(year, split_num)
test_data_2_output_small_path = '../data/controversy/processed_data/linear_structure/twitter{}/split_data/split_{}/test_2_small_unique.json'.format(year, split_num)
train_index = read_index(train_index_path)
test_index = read_index(test_index_path)
train_data = get_data(data, train_index)
test_data = get_data(data, test_index)
(train_data_unique, train_length_lst) = get_unique_posts(train_data)
(test_data_unique, test_length_lst) = get_unique_posts(test_data)
print('Max training is: {}, Max testing is: {}'.format(max(train_length_lst), min(train_length_lst)))
print('Min training is: {}, Min testing is: {}'.format(max(test_length_lst), min(test_length_lst)))
print()
train_data_unique = change_labels(train_data_unique)
test_data_unique = change_labels(test_data_unique)
print('Training labels: {}'.format(collections.Counter([item['label'] for item in train_data_unique])))
print('Testing labels: {}'.format(collections.Counter([item['label'] for item in test_data_unique])))
print()
(test_data_1, test_data_2) = split_test_data(test_data_unique)
write_data(train_data_unique, train_data_output_path)
write_data(test_data_1, test_data_1_output_path)
write_data(test_data_2, test_data_2_output_path)
write_data(get_small_data(train_data_unique), train_data_output_small_path)
write_data(get_small_data(test_data_1), test_data_1_output_small_path)
write_data(get_small_data(test_data_2), test_data_2_output_small_path) |
class AE(nn.Module):
def __init__(self, in_channels, out_channels, latent_channels, spiral_indices, down_transform, up_transform):
super(AE, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.latent_channels = latent_channels
self.latent_channels = latent_channels
self.spiral_indices = spiral_indices
self.down_transform = down_transform
self.up_transform = up_transform
self.num_vert = self.down_transform[(- 1)].size(0)
self.en_layers = nn.ModuleList()
for idx in range(len(out_channels)):
if (idx == 0):
self.en_layers.append(SpiralEnblock(in_channels, out_channels[idx], self.spiral_indices[idx]))
else:
self.en_layers.append(SpiralEnblock(out_channels[(idx - 1)], out_channels[idx], self.spiral_indices[idx]))
self.en_layers.append(nn.Linear((self.num_vert * out_channels[(- 1)]), latent_channels))
self.de_layers = nn.ModuleList()
self.de_layers.append(nn.Linear(latent_channels, (self.num_vert * out_channels[(- 1)])))
for idx in range(len(out_channels)):
if (idx == 0):
self.de_layers.append(SpiralDeblock(out_channels[((- idx) - 1)], out_channels[((- idx) - 1)], self.spiral_indices[((- idx) - 1)]))
else:
self.de_layers.append(SpiralDeblock(out_channels[(- idx)], out_channels[((- idx) - 1)], self.spiral_indices[((- idx) - 1)]))
self.de_layers.append(SpiralConv(out_channels[0], in_channels, self.spiral_indices[0]))
self.reset_parameters()
def reset_parameters(self):
for (name, param) in self.named_parameters():
if ('bias' in name):
nn.init.constant_(param, 0)
else:
nn.init.xavier_uniform_(param)
def encoder(self, x):
for (i, layer) in enumerate(self.en_layers):
if (i != (len(self.en_layers) - 1)):
x = layer(x, self.down_transform[i])
else:
x = x.view((- 1), layer.weight.size(1))
x = layer(x)
return x
def decoder(self, x):
num_layers = len(self.de_layers)
num_features = (num_layers - 2)
for (i, layer) in enumerate(self.de_layers):
if (i == 0):
x = layer(x)
x = x.view((- 1), self.num_vert, self.out_channels[(- 1)])
elif (i != (num_layers - 1)):
x = layer(x, self.up_transform[(num_features - i)])
else:
x = layer(x)
return x
def forward(self, x, *indices):
z = self.encoder(x)
out = self.decoder(z)
return out |
def create_decoder():
try:
decoder = decoding.DECODER_REGISTRY[args.decoder](args)
except Exception as e:
logging.fatal(('An %s has occurred while initializing the decoder: %s Stack trace: %s' % (sys.exc_info()[0], e, traceback.format_exc())))
sys.exit('Could not initialize decoder.')
add_predictor(decoder)
return decoder |
def is_type(type_, types):
for attr in types:
if getattr(type_, attr, False):
return True
return False |
class ModelLM(object):
def __init__(self, model_name_or_path=None, model_type=None, device=None, gpu_batch_size=None, gpu_id=0):
self.gpu_batch_size = gpu_batch_size
if (model_type is None):
self.model = None
elif (model_type == 'gpt2'):
self.model = GPT2LM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size)
elif (model_type == 'bert'):
self.model = BertLM(model_name_or_path, gpu_batch_size=gpu_batch_size, gpu_id=gpu_id)
elif (model_type == 'seq2seq'):
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='Seq2SeqScorer')
elif (model_type == 'masked'):
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='MaskedLMScorer')
elif (model_type == 'incremental'):
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type='IncrementalLMScorer')
else:
self.model = MiniconsLM(model_name_or_path, device=device, gpu_batch_size=gpu_batch_size, model_type=model_type) |
def eval_all_metrics(ref_texts, hypo_texts, label):
os.makedirs('eval_logs/ms_jaccard', exist_ok=True)
msj_results = evaluate_ms_jaccard(hypo_texts=hypo_texts, ref_texts=ref_texts)
pickle.dump(msj_results, open(f'eval_logs/ms_jaccard/{label}.pickle', 'wb'))
os.makedirs('eval_logs/tfidf_distance', exist_ok=True)
wfd_results = evaluate_tfidf_distance(hypo_texts=hypo_texts, ref_texts=ref_texts)
pickle.dump(wfd_results, open(f'eval_logs/tfidf_distance/{label}.pickle', 'wb'))
os.makedirs('eval_logs/frechet_bert_distance', exist_ok=True)
fbd_results = evaluate_frechet_bert_distance(hypo_texts=hypo_texts, ref_texts=ref_texts)
pickle.dump(fbd_results, open(f'eval_logs/frechet_bert_distance/{label}.pickle', 'wb'))
os.makedirs('eval_logs/forward_backward_bleu', exist_ok=True)
bleu_results = evaluate_forward_backward_bleu(hypo_texts=hypo_texts, ref_texts=ref_texts)
pickle.dump(bleu_results, open(f'eval_logs/forward_backward_bleu/{label}.pickle', 'wb')) |
def test_initialize_background_knowledge_1():
_bk = Background()
assert (_bk.modes is None)
assert (not _bk.line_search)
assert (not _bk.recursion) |
class RobertaLongSelfAttention(LongformerSelfAttention):
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False):
return super().forward(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions) |
def tmpt5_base_tied_lmheads_512_4_4p_bw12_squad1_mpipe():
return dict(model_type='new_t5_stateless', model_name_or_path='t5-base', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True) |
class Convolution2DArchitectureBase(ModelArchitecture):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def preprocess_data(self, x_train, x_val, x_test, y_train, y_val, y_test):
data = (x_train[(..., None)], x_val[(..., None)], x_test[(..., None)], y_train, y_val, y_test)
if self.use_gpu:
data = helpers.arrays_to_cupy(*data)
else:
data = helpers.arrays_to_numpy(*data)
return data
def postprocess_relevance(self, *args):
relevance = helpers.arrays_to_numpy(*args)
return tuple([r[(..., 0)] for r in relevance])
def assert_shapes(self, x_shape, y_shape):
assert (len(x_shape) == 4), 'Expected 4-dimensional shape tuple for 2d-CNN type models, but got x_shape={}'.format(x_shape)
assert (len(y_shape) == 2), 'Expected 2-dimensional shape tuple for 2d-CNN type models, but got y_shape={}'.format(y_shape) |
def write_conlltag_with_dict(tag_list, root_path, tag_file, dictfn1, dictfn2, dictfn3, dictfn4=None):
per_dict = []
org_dict = []
loc_dict = []
gpe_dict = []
with open(dictfn1) as dict_f:
for line in dict_f:
dict1.append(line.strip())
with open(dictfn2) as dict_f:
for line in dict_f:
dict2.append(line.strip())
with open(dictfn3) as dict_f:
for line in dict_f:
dict3.append(line.strip())
if dictfn4:
with open(dictfn4) as dict_f:
for line in dict_f:
dict4.append(line.strip())
RE_link = re.compile('<a.*?>(.*?)</a>')
zh_tag = open(tag_file, 'w', encoding='utf-8')
count_file = 0
for (root, dirs, files) in os.walk(root_path):
for file in files:
count_file += 1
if ((count_file % 100) == 0):
print(count_file)
filename = os.path.join(root, file)
with open(filename) as f:
for line in f:
if (('<doc id=' in line) or ('</doc>' in line)):
continue
line = line.strip()
try:
line = ((line[:(- 1)] + ' ') + line[(- 1)])
except:
continue
line = line.replace('<a href=', '<ahref=')
line = line.replace('</a >', '</a>')
line_list = line.split()
if (len(line_list) > 30):
continue
for (i, j) in enumerate(line_list):
if ((j[0] in string.punctuation) and (j[0] != '<')):
j = ((j[0] + ' ') + j[1:])
if ((j[(- 1)] in string.punctuation) and (j[(- 1)] != '>')):
j = ((j[:(- 1)] + ' ') + j[(- 1)])
line_list[i] = j
line = ' '.join(line_list)
interlinks_raw = re.findall(RE_link, line)
aline = line
line = re.sub(RE_link, ' &&&&&******* ', line)
line_list = line.split()
a_line_list = line_list
interlinks_index = []
index_flag = 0
for (i, j) in enumerate(line_list):
count = j.count('&&&&&*******')
if (count == 0):
continue
elif (count == 1):
line_list[i] = line_list[i].replace('&&&&&*******', interlinks_raw[index_flag])
index_flag += 1
interlinks_index.append(i)
else:
print('multiiiiiiiiiiiii')
for c in range(count):
line_list[i] = line_list[i].replace('&&&&&*******', ((' ' + interlinks_raw[index_flag]) + ' '), 1)
index_flag += 1
interlinks_index.append(i)
tag = (['O'] * len(line_list))
index_flag = 0
for (i, j) in enumerate(interlinks_index):
if (interlinks_raw[i] in dict1):
tag[j] = tag_list[0]
elif (interlinks_raw[i] in dict2):
tag[j] = tag_list[1]
elif (interlinks_raw[i] in dict3):
tag[j] = tag_list[2]
elif (interlinks_raw[i] in dict4):
tag[j] = tag_list[3]
else:
continue
for (i, j) in enumerate(line_list):
if (('<ahref' in j) or ('</a' in j)):
continue
j = j.split()
if (tag[i] == 'O'):
for k in range(len(j)):
zh_tag.write((((j[k] + '\t') + tag[i]) + '\n'))
continue
if (len(j) == 1):
zh_tag.write(((((j[0] + '\t') + 'S') + tag[i]) + '\n'))
else:
for k in range(len(j)):
if (k == 0):
zh_tag.write(((((j[k] + '\t') + 'B') + tag[i]) + '\n'))
elif (k == (len(j) - 1)):
zh_tag.write(((((j[k] + '\t') + 'E') + tag[i]) + '\n'))
else:
zh_tag.write(((((j[k] + '\t') + 'I') + tag[i]) + '\n'))
zh_tag.write('\n') |
def compute_lm_ppl(hyp_uid_to_tra, score_fn):
lm_score = 0.0
w_cnt = 0
for hyp in hyp_uid_to_tra.values():
cur_score = score_fn(hyp)
cur_cnt = (len(hyp.split()) + 1)
lm_score += cur_score
w_cnt += cur_cnt
logger.debug(f'''
score sum/avg = {cur_score:.2f}/{(cur_score / cur_cnt):.2f}
hyp = {hyp}''')
lm_ppl = math.pow(10, ((- lm_score) / w_cnt))
logger.debug(f'lm ppl = {lm_ppl:.2f}; num. of words = {w_cnt}')
return lm_ppl |
def test_disambiguate_int(ambiguous_node_int):
ground_truth = np.array([['1', '1::HiClass::Separator::2'], ['2', '2::HiClass::Separator::3']])
ambiguous_node_int._disambiguate()
assert_array_equal(ground_truth, ambiguous_node_int.y_) |
class HighwayEntrySample():
def __init__(self):
curvature_range = [(- 0.03), 0.03]
self.c1 = world.world.rng_road_network.uniform(low=(- 0.01), high=0.01)
self.c2 = world.world.rng_road_network.uniform(low=(- 0.005), high=0.005)
self.c3 = world.world.rng_road_network.uniform(low=curvature_range[0], high=curvature_range[1])
self.c_merge = world.world.rng_road_network.uniform(low=curvature_range[0], high=(self.c1 - 0.01)) |
def probability_that_2_overtakes_1(i):
return (lambda x: (x[i].drop_top2_probs > x[i].drop_top1_probs).float().mean(dim=0).view((- 1))) |
def log_softmax_to_probabilities(log_softmax, epsilon=1e-05):
softmax = np.exp(log_softmax)
probabilities = (softmax / np.sum(softmax))
assert ((np.sum(probabilities) >= (1.0 - epsilon)) and (np.sum(probabilities) <= (1.0 + epsilon)))
return probabilities |
def loss_plot(epochs_adam_sa, epochs_lbfgs_sa, adam_loss, lbfgs_loss, title=None, dpi=150, figsize=(10, 8)):
x_adam = range(0, (epochs_adam_sa + 250), 250)
x_lbfgs = range((x_adam[(- 1)] + 5), ((epochs_adam_sa + epochs_lbfgs_sa) + 5), 5)
plt.figure(dpi=dpi, figsize=figsize)
plt.vlines(x_adam[(- 1)], lbfgs_loss[0], adam_loss[(- 1)], linewidth=3, colors='r')
plt.plot(x_adam, adam_loss, c='k', linewidth=3, label='ADAM')
plt.plot(x_lbfgs, lbfgs_loss, linewidth=3, c='r', label='L-BFGS')
plt.xlabel('Epoch', fontsize=22.5)
plt.ylabel('SA-PINN Loss', fontsize=22.5)
plt.grid(True)
plt.xlim(0, (epochs_adam_sa + epochs_lbfgs_sa))
plt.yscale('log')
if (title is not None):
plt.title(title)
plt.show() |
def register_Ns3InfrastructureWifiMac_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('Enqueue', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Mac48Address', 'to')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetQosSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetPcfSupported', 'void', [param('bool', 'enable')])
cls.add_method('GetPcfSupported', 'bool', [], is_const=True)
return |
def FloatDouble(ctx=None):
ctx = _get_ctx(ctx)
return FPSortRef(Z3_mk_fpa_sort_double(ctx.ref()), ctx) |
def read_wav(filepath: str, target_sr: int=44100, duration: Optional[float]=None) -> Tuple[(np.ndarray, int)]:
print(f'reading audio from {filepath}')
if filepath.startswith('gs://'):
gcs = storage.Client(project=GOOGLE_CLOUD_PROJECT)
(bucket, file_name) = filepath.replace('gs://', '').split('/', maxsplit=1)
gcs_bucket_obj = gcs.get_bucket(bucket)
blob = gcs_bucket_obj.blob(file_name)
bytes_as_string = blob.download_as_string()
else:
with open(filepath, 'rb') as f:
bytes_as_string = f.read()
(samples, audio_sr) = sf.read(io.BytesIO(bytes_as_string), frames=(math.floor((target_sr * duration)) if (duration is not None) else (- 1)))
print(f'finished reading audio from {filepath} with sr {audio_sr} with duration {round((len(samples) / audio_sr), 2)}secs')
if (audio_sr != target_sr):
print(f'resampling audio input {filepath} from {audio_sr} to {target_sr}')
samples = librosa.resample(samples, orig_sr=audio_sr, target_sr=target_sr)
assert np.issubdtype(samples.dtype, float), f'exected floating-point audio; got type {samples.dtype}'
return (samples, target_sr) |
def build_srm_rom_feat(cfg):
srm_rom_feat = SRMROMFeat(in_channel=cfg['MODEL']['BACKBONE']['CHANNELS'][(- 1)], box_channels=cfg['MODEL']['FEAT']['BOX_CHANNELS'], dis_channels=cfg['MODEL']['FEAT']['DIS_CHANNELS'], cls_channels=cfg['MODEL']['FEAT']['CLS_CHANNELS'], rom_channels=cfg['MODEL']['FEAT']['ROM_CHANNELS'])
return srm_rom_feat |
def _write_ninja_file_and_compile_objects(sources: List[str], objects, cflags, post_cflags, cuda_cflags, cuda_post_cflags, build_directory: str, verbose: bool, with_cuda: Optional[bool]) -> None:
verify_ninja_availability()
if IS_WINDOWS:
compiler = os.environ.get('CXX', 'cl')
else:
compiler = os.environ.get('CXX', 'c++')
check_compiler_abi_compatibility(compiler)
if (with_cuda is None):
with_cuda = any(map(_is_cuda_file, sources))
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
print(f'Emitting ninja build file {build_file_path}...')
_write_ninja_file(path=build_file_path, cflags=cflags, post_cflags=post_cflags, cuda_cflags=cuda_cflags, cuda_post_cflags=cuda_post_cflags, sources=sources, objects=objects, ldflags=None, library_target=None, with_cuda=with_cuda)
if verbose:
print('Compiling objects...')
_run_ninja_build(build_directory, verbose, error_prefix='Error compiling objects for extension') |
class MHALayerNetTest(MHABaseTest):
def create_feature_network(self, input_shape):
return MHANet(embed_dim=self.embed_dim, num_heads=self.num_heads, kdim=self.kdim, vdim=self.vdim, bias=self.bias, add_bias_kv=self.add_bias_kv, add_zero_attn=self.add_zero_attn, batch_first=self.batch_first) |
def reporthook(*args, **kwargs):
kwargs2 = dict(unit_scale=True, miniters=1)
kwargs2.update(kwargs)
bar = __call__(None, *args, **kwargs2)
class ReportHook(object):
def __init__(self, t):
self.t = t
def __call__(self, b=1, bsize=1, tsize=None):
if hasattr(self.t, 'total'):
if (tsize is not None):
self.t.total = tsize
if hasattr(self.t, 'update'):
self.t.update(((b * bsize) - self.t.n))
def __enter__(self):
return self
def __exit__(self, *exc):
if hasattr(self.t, '__exit__'):
self.t.__exit__(*exc)
return ReportHook(bar) |
def copy_from_predicted(mode, train_attention_to_copy, eval_attention_to_copy):
attention_to_copy = (train_attention_to_copy if (mode == tf.estimator.ModeKeys.TRAIN) else eval_attention_to_copy)
if (len(attention_to_copy.get_shape()) < 3):
attention_to_copy = tf.one_hot(attention_to_copy, tf.shape(attention_to_copy)[(- 1)], on_value=constants.VERY_LARGE, off_value=constants.VERY_SMALL)
return tf.cast(attention_to_copy, tf.float32) |
def create_model(config_path):
config = OmegaConf.load(config_path)
model = instantiate_from_config(config.model).cpu()
print(f'Loaded model config from [{config_path}]')
return model |
def move_file(file, dst_dir, overwrite=True):
basename = os.path.basename(file)
(head, tail) = os.path.splitext(basename)
dst_file = os.path.join(dst_dir, basename)
if overwrite:
count = 0
while os.path.exists(dst_file):
count += 1
dst_file = os.path.join(dst_dir, ('%s_%d%s' % (head, count, tail)))
shutil.move(file, dst_file)
return dst_file |
def test_add_panel():
def f14(growablebuffer):
growablebuffer._add_panel()
growablebuffer = GrowableBuffer(np.float32, initial=10, resize=2.0)
growablebuffer._pos = 5
assert (len(growablebuffer._panels) == 1)
assert (len(growablebuffer._panels[0]) == 10)
assert (growablebuffer._pos == 5)
f14(growablebuffer)
assert (len(growablebuffer._panels) == 2)
assert (len(growablebuffer._panels[0]) == 10)
assert (len(growablebuffer._panels[1]) == 20)
assert (growablebuffer._pos == 0) |
def validate_tl_line(line, LTRB=True, withTranscription=True, withConfidence=True, imWidth=0, imHeight=0):
get_tl_line_values(line, LTRB, withTranscription, withConfidence, imWidth, imHeight) |
def test_multilingual_entity_vocab(multilingual_entity_vocab):
assert (len(multilingual_entity_vocab) == 6)
assert (len(list(multilingual_entity_vocab)) == 9)
assert multilingual_entity_vocab.contains('', 'ja')
assert (multilingual_entity_vocab.get_id('[MASK]', 'ja') == 2)
assert (multilingual_entity_vocab.get_id('[MASK]', 'en') == 2)
assert (multilingual_entity_vocab.get_id('', 'ja') == 3)
assert (multilingual_entity_vocab.get_title_by_id(3, 'ja') == '')
assert (multilingual_entity_vocab.get_count_by_title('', 'ja') == 142) |
_task('translation_from_pretrained_xlm', dataclass=TranslationFromPretrainedXLMConfig)
class TranslationFromPretrainedXLMTask(TranslationTask):
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename) |
def lift_uniformiser_odd(p, u, n):
g = lift_gen_to_gamma1((p ** u), n)
return [(p * g[0]), g[1], (p * g[2]), g[3]] |
def getResource(request):
username = request.session['username']
date = str(request.session['date'])
date = ((date[0:4] + date[4:6]) + date[6:8])
path = ((username + '/') + date)
file = request.FILES['file']
filename = ((path + '/') + file.name)
file.save(filename)
return HttpResponse('file saved') |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (head_dim ** (- 0.5))
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = qkv.unbind(0)
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x |
def prepare_training_data(src1, src2, tgt, output_folder, training_frac):
assert (training_frac < 1.0)
if (not os.path.exists(output_folder)):
os.makedirs(output_folder)
src1_paths = sorted(glob.glob((src1 + '/*')))
if src2:
check = True
src2_paths = sorted(glob.glob((src2 + '/*')))
else:
check = False
src2_paths = src1_paths
tgt_paths = sorted(glob.glob((tgt + '/*')))
assert (len(src1_paths) == len(src2_paths) == len(tgt_paths))
all_files = list(zip(src1_paths, src2_paths, tgt_paths))
random.shuffle(all_files)
train_idx = round((training_frac * len(all_files)))
dev_idx = (train_idx + round((((1.0 - training_frac) * len(all_files)) / 2)))
if ((dev_idx <= train_idx) or (dev_idx == len(all_files))):
logging.error('ERROR: Fractions for data split are not usable with the dataset size. Adjust the parameter and try again. ')
return
write_training_data(all_files[:train_idx], '{}/train_'.format(output_folder), check)
write_training_data(all_files[train_idx:dev_idx], '{}/dev_'.format(output_folder), check)
write_training_data(all_files[dev_idx:], '{}/test_'.format(output_folder), check) |
class RandomScaleCrop(object):
def __init__(self, base_size, crop_size, fill=0):
self.base_size = base_size
self.crop_size = crop_size
self.fill = fill
def __call__(self, img, mask):
short_size = random.randint(int((self.base_size * 0.8)), int((self.base_size * 1.2)))
(w, h) = img.size
if (h > w):
ow = short_size
oh = int((((1.0 * h) * ow) / w))
else:
oh = short_size
ow = int((((1.0 * w) * oh) / h))
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
if (short_size < self.crop_size):
padh = ((self.crop_size - oh) if (oh < self.crop_size) else 0)
padw = ((self.crop_size - ow) if (ow < self.crop_size) else 0)
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
(w, h) = img.size
x1 = random.randint(0, (w - self.crop_size))
y1 = random.randint(0, (h - self.crop_size))
img = img.crop((x1, y1, (x1 + self.crop_size), (y1 + self.crop_size)))
mask = mask.crop((x1, y1, (x1 + self.crop_size), (y1 + self.crop_size)))
return (img, mask) |
def is_in_notebook():
try:
get_ipython = sys.modules['IPython'].get_ipython
if ('IPKernelApp' not in get_ipython().config):
raise ImportError('console')
if ('VSCODE_PID' in os.environ):
raise ImportError('vscode')
if (('DATABRICKS_RUNTIME_VERSION' in os.environ) and (os.environ['DATABRICKS_RUNTIME_VERSION'] < '11.0')):
raise ImportError('databricks')
return (importlib.util.find_spec('IPython') is not None)
except (AttributeError, ImportError, KeyError):
return False |
class R1_mAP_reranking(Metric):
def __init__(self, num_query, max_rank=50, feat_norm='yes'):
super(R1_mAP_reranking, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
self.feat_norm = feat_norm
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output):
(feat, pid, camid) = output
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self):
feats = torch.cat(self.feats, dim=0)
if (self.feat_norm == 'yes'):
print('The test feature is normalized')
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
print('Enter reranking')
distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
(cmc, mAP) = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return (cmc, mAP) |
def subset_refuter(df: pd.DataFrame, treatment: str, fraction: float=0.8):
df = df.groupby(treatment, group_keys=False).apply((lambda x: x.sample(frac=fraction)))
validate = 1
return (df, validate) |
def get_detection_scores(detection_results_file, rgb_fns, obj_id, score_thr):
with open(detection_results_file) as jsonFile:
detections = json.load(jsonFile)
jsonFile.close()
scores = [(- 1) for x in range(len(rgb_fns))]
for (counter, rgb_fn) in enumerate(rgb_fns):
rgb_fn = rgb_fn.split('/')
scene_id = int(rgb_fn[(- 3)])
img_id = int(rgb_fn[(- 1)][:(- 4)])
detection_result_key = '{}/{}'.format(scene_id, img_id)
detection = detections[detection_result_key]
best_det_score = 0
for d in detection:
detected_obj_id = d['obj_id']
bbox_est = d['bbox_est']
score = d['score']
if (score < score_thr):
continue
if (obj_id != detected_obj_id):
continue
if (score > best_det_score):
best_det_score = score
scores[counter] = best_det_score
return scores |
class _ReferenceConvBnNd(torch.nn.Conv2d, torch.nn.modules.conv._ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias, padding_mode, eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, False, padding_mode)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.eps = eps
self.momentum = momentum
self.freeze_bn = (freeze_bn if self.training else True)
self.num_features = out_channels
self.gamma = nn.Parameter(torch.Tensor(out_channels))
self.beta = nn.Parameter(torch.Tensor(out_channels))
self.affine = True
self.track_running_stats = True
self.register_buffer('running_mean', torch.zeros(out_channels))
self.register_buffer('running_var', torch.ones(out_channels))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
self.activation_post_process = self.qconfig.activation()
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_bn_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_bn_parameters(self):
self.reset_running_stats()
init.uniform_(self.gamma)
init.zeros_(self.beta)
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def reset_parameters(self):
super(_ReferenceConvBnNd, self).reset_parameters()
if hasattr(self, 'gamma'):
self.reset_bn_parameters()
def update_bn_stats(self):
self.freeze_bn = False
return self
def freeze_bn_stats(self):
self.freeze_bn = True
return self
def _forward(self, input):
if (self.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if (self.training and (not self.freeze_bn) and self.track_running_stats):
if (self.num_batches_tracked is not None):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / float(self.num_batches_tracked))
else:
exponential_average_factor = self.momentum
running_std = torch.sqrt((self.running_var + self.eps))
scale_factor = (self.gamma / running_std)
scaled_weight = (self.weight * scale_factor.reshape([(- 1), 1, 1, 1]))
conv = self._conv_forward(input, self.weight_fake_quant(scaled_weight))
if (self.training and (not self.freeze_bn)):
if (self.bias is not None):
conv_orig = ((conv / scale_factor.reshape([1, (- 1), 1, 1])) + self.bias.reshape([1, (- 1), 1, 1]))
else:
conv_orig = (conv / scale_factor.reshape([1, (- 1), 1, 1]))
batch_mean = torch.mean(conv_orig, dim=[0, 2, 3])
batch_var = torch.var(conv_orig, dim=[0, 2, 3], unbiased=False)
n = float((conv_orig.numel() / conv_orig.size()[1]))
unbiased_batch_var = (batch_var * (n / (n - 1)))
batch_rstd = (torch.ones_like(batch_var, memory_format=torch.contiguous_format) / torch.sqrt((batch_var + self.eps)))
conv = (((self.gamma * batch_rstd).reshape([1, (- 1), 1, 1]) * conv_orig) + (self.beta - ((self.gamma * batch_rstd) * batch_mean)).reshape([1, (- 1), 1, 1]))
self.running_mean = ((exponential_average_factor * batch_mean.detach()) + ((1 - exponential_average_factor) * self.running_mean))
self.running_var = ((exponential_average_factor * unbiased_batch_var.detach()) + ((1 - exponential_average_factor) * self.running_var))
elif (self.bias is None):
conv = (conv + (self.beta - ((self.gamma * self.running_mean) / running_std)).reshape([1, (- 1), 1, 1]))
else:
conv = (conv + (((self.gamma * (self.bias - self.running_mean)) / running_std) + self.beta).reshape([1, (- 1), 1, 1]))
return conv
def extra_repr(self):
return super(_ReferenceConvBnNd, self).extra_repr()
def forward(self, input):
return self.activation_post_process(self._forward(input))
def from_float(cls, mod, qconfig=None):
assert (type(mod) == cls._FLOAT_MODULE), ((('qat.' + cls.__name__) + '.from_float only works for ') + cls._FLOAT_MODULE.__name__)
if (not qconfig):
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
qconfig = mod.qconfig
(conv, bn) = (mod[0], mod[1])
qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size, conv.stride, conv.padding, conv.dilation, conv.groups, (conv.bias is not None), conv.padding_mode, bn.eps, bn.momentum, False, qconfig)
qat_convbn.weight = conv.weight
qat_convbn.bias = conv.bias
qat_convbn.gamma = bn.weight
qat_convbn.beta = bn.bias
qat_convbn.running_mean = bn.running_mean
qat_convbn.running_var = bn.running_var
qat_convbn.num_batches_tracked = bn.num_batches_tracked
return qat_convbn |
def test_arraytype_3():
text = str(ak.with_parameter(ak.Array([[1, 2, 3], [], [4, 5]]), 'wonky', {'other': 'JSON'}).type)
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert (str(parsedtype) == text) |
def Evaluate(num_epochs):
since = time.time()
for haha in range(1):
model = SVC(C=10)
for epoch in range(1):
for phase in ['test', 'train', 'val']:
running_loss = 0.0
running_corrects = 0.0
total = 0
embedding.train(False)
for i in tqdm(range(600)):
(support_feature, support_belong, test_feature, test_belong) = image_datasets[phase].__getitem__(i)
support_feature = torch.squeeze(support_feature, 0)
test_feature = torch.squeeze(test_feature, 0)
support_feature = embedding(Variable(support_feature.cuda())).data.cpu()
test_feature = embedding(Variable(test_feature.cuda())).data.cpu()
support_feature = torch.squeeze(support_feature, 0).numpy()
support_belong = torch.squeeze(support_belong, 0).numpy()
test_feature = torch.squeeze(test_feature, 0).numpy()
test_belong = torch.squeeze(test_belong, 0).numpy()
support_belong = support_belong.ravel()
test_belong = test_belong.ravel()
model.fit(support_feature, support_belong)
Ans = model.predict(test_feature)
Ans = numpy.array(Ans)
running_corrects += (Ans == test_belong).sum()
total += test_feature.shape[0]
Accuracy = (running_corrects / (total * 1.0))
info = {'Accuracy': Accuracy}
print('{}: Accuracy: {:.4f} '.format(phase, Accuracy))
print()
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) |
_module()
class pvt_v2_b2(PyramidVisionTransformerV2Original):
def __init__(self, **kwargs):
super(pvt_v2_b2, self).__init__(patch_sizes=(7, 3, 3, 3), strides=(4, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(8, 8, 4, 4), qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), drop_rate=0.0, drop_path_rate=0.1, **kwargs) |
def deprecated(func):
(func)
def wrapper(*args, **kwargs):
warnings.warn('This function is deprecated.', DeprecationWarning)
return func(*args, **kwargs)
return wrapper |
def gen_env(render='drgb'):
HORIZON = 750
FLOW_RATE = 2000
RL_PENETRATION = 0.1
NUM_RL = 5
additional_net_params = deepcopy(ADDITIONAL_NET_PARAMS)
additional_net_params['merge_lanes'] = 1
additional_net_params['highway_lanes'] = 1
additional_net_params['pre_merge_length'] = 500
vehicles = VehicleParams()
vehicles.add(veh_id='human', acceleration_controller=(SimCarFollowingController, {}), car_following_params=SumoCarFollowingParams(speed_mode=9), num_vehicles=5)
vehicles.add(veh_id='rl', acceleration_controller=(RLController, {}), car_following_params=SumoCarFollowingParams(speed_mode=9), num_vehicles=0)
inflow = InFlows()
inflow.add(veh_type='human', edge='inflow_highway', vehs_per_hour=((1 - RL_PENETRATION) * FLOW_RATE), depart_lane='free', depart_speed=10)
inflow.add(veh_type='rl', edge='inflow_highway', vehs_per_hour=(RL_PENETRATION * FLOW_RATE), depart_lane='free', depart_speed=10)
inflow.add(veh_type='human', edge='inflow_merge', vehs_per_hour=100, depart_lane='free', depart_speed=7.5)
flow_params = dict(exp_tag='merge_0', env_name=MergePOEnv, network=MergeNetwork, simulator='traci', sim=SumoParams(restart_instance=True, sim_step=0.5, render=render, save_render=True), env=EnvParams(horizon=HORIZON, sims_per_step=2, warmup_steps=0, additional_params={'max_accel': 1.5, 'max_decel': 1.5, 'target_velocity': 20, 'num_rl': NUM_RL}), net=NetParams(inflows=inflow, additional_params=additional_net_params), veh=vehicles, initial=InitialConfig())
return flow_params |
class TBTimeFunctionTests(unittest.TestCase):
def setUp(self):
super(TBTimeFunctionTests, self).setUp()
dt1 = datetime.datetime(2000, 11, 12)
self.dt_a = [(dt1 + datetime.timedelta(hours=val)) for val in range(100)]
self.dt_b = [(dt1 + datetime.timedelta(hours=val)) for val in range((- 20), 20)]
self.dt_b2 = [(dt1 + datetime.timedelta(hours=val)) for val in range((- 20), (- 2))]
self.dt_a_shuf = [(dt1 + datetime.timedelta(hours=val)) for val in [86, 75, 53, 74, 35, 57, 63, 84, 82, 89, 45, 10, 41, 78, 14, 62, 98, 80, 42, 24, 31, 2, 34, 85, 28, 47, 21, 81, 54, 7, 12, 18, 83, 5, 9, 3, 15, 40, 69, 38, 97, 36, 70, 25, 66, 23, 59, 94, 99, 60, 1, 61, 11, 90, 52, 30, 13, 64, 49, 77, 27, 6, 16, 4, 76, 58, 19, 22, 39, 55, 87, 37, 95, 29, 33, 72, 32, 48, 50, 8, 96, 93, 44, 73, 26, 71, 88, 51, 79, 17, 20, 92, 68, 65, 91, 46, 0, 67, 56, 43]]
self.dt_b_shuf = [(dt1 + datetime.timedelta(hours=val)) for val in [2, 4, (- 4), (- 11), 15, (- 20), 9, (- 15), 1, 3, (- 19), (- 3), (- 12), (- 16), (- 13), 18, (- 5), (- 9), (- 1), 16, 19, (- 2), 6, 0, (- 8), 11, (- 14), (- 10), 13, 14, 17, (- 17), 12, (- 6), (- 7), 5, 7, 8, (- 18), 10]]
def tearDown(self):
super(TBTimeFunctionTests, self).tearDown()
def test_tOverlap(self):
real_ans = ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
ans = tb.tOverlap(self.dt_a, self.dt_b)
self.assertEqual(real_ans, ans)
self.assertEqual((None, None), tb.tOverlap(self.dt_a, self.dt_b2))
def test_tOverlap_random(self):
real_ans = [[11, 14, 21, 29, 30, 31, 33, 34, 35, 36, 50, 52, 56, 61, 62, 63, 66, 79, 89, 96], [0, 1, 4, 6, 8, 9, 15, 19, 20, 22, 23, 25, 28, 29, 30, 32, 35, 36, 37, 39]]
ans = tb.tOverlap(self.dt_a_shuf, self.dt_b_shuf)
numpy.testing.assert_array_equal(real_ans, ans)
def test_tOverlapHalf(self):
real_ans = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
ans = tb.tOverlapHalf(self.dt_a, self.dt_b)
self.assertEqual(real_ans, ans)
def test_tOverlapHalf_random(self):
real_ans = [0, 1, 4, 6, 8, 9, 15, 19, 20, 22, 23, 25, 28, 29, 30, 32, 35, 36, 37, 39]
ans = tb.tOverlapHalf(self.dt_a_shuf, self.dt_b_shuf)
self.assertEqual(real_ans, ans)
def test_tOverlapSorted(self):
real_ans = ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
ans = tb.tOverlap(self.dt_a, self.dt_b, presort=True)
numpy.testing.assert_array_equal(real_ans, ans)
def test_tOverlapHalfSorted(self):
real_ans = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
ans = tb.tOverlapHalf(self.dt_a, self.dt_b, presort=True)
numpy.testing.assert_array_equal(real_ans, ans)
def test_tCommon(self):
real_ans = (array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool), array([False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool))
ans = tb.tCommon(self.dt_a, self.dt_b)
self.assertEqual(real_ans[0].tolist(), ans[0].tolist())
self.assertEqual(real_ans[1].tolist(), ans[1].tolist())
real_ans2 = ([datetime.datetime(2000, 11, 12, 0, 0), datetime.datetime(2000, 11, 12, 1, 0), datetime.datetime(2000, 11, 12, 2, 0), datetime.datetime(2000, 11, 12, 3, 0), datetime.datetime(2000, 11, 12, 4, 0), datetime.datetime(2000, 11, 12, 5, 0), datetime.datetime(2000, 11, 12, 6, 0), datetime.datetime(2000, 11, 12, 7, 0), datetime.datetime(2000, 11, 12, 8, 0), datetime.datetime(2000, 11, 12, 9, 0), datetime.datetime(2000, 11, 12, 10, 0), datetime.datetime(2000, 11, 12, 11, 0), datetime.datetime(2000, 11, 12, 12, 0), datetime.datetime(2000, 11, 12, 13, 0), datetime.datetime(2000, 11, 12, 14, 0), datetime.datetime(2000, 11, 12, 15, 0), datetime.datetime(2000, 11, 12, 16, 0), datetime.datetime(2000, 11, 12, 17, 0), datetime.datetime(2000, 11, 12, 18, 0), datetime.datetime(2000, 11, 12, 19, 0)], [datetime.datetime(2000, 11, 12, 0, 0), datetime.datetime(2000, 11, 12, 1, 0), datetime.datetime(2000, 11, 12, 2, 0), datetime.datetime(2000, 11, 12, 3, 0), datetime.datetime(2000, 11, 12, 4, 0), datetime.datetime(2000, 11, 12, 5, 0), datetime.datetime(2000, 11, 12, 6, 0), datetime.datetime(2000, 11, 12, 7, 0), datetime.datetime(2000, 11, 12, 8, 0), datetime.datetime(2000, 11, 12, 9, 0), datetime.datetime(2000, 11, 12, 10, 0), datetime.datetime(2000, 11, 12, 11, 0), datetime.datetime(2000, 11, 12, 12, 0), datetime.datetime(2000, 11, 12, 13, 0), datetime.datetime(2000, 11, 12, 14, 0), datetime.datetime(2000, 11, 12, 15, 0), datetime.datetime(2000, 11, 12, 16, 0), datetime.datetime(2000, 11, 12, 17, 0), datetime.datetime(2000, 11, 12, 18, 0), datetime.datetime(2000, 11, 12, 19, 0)])
ans = tb.tCommon(self.dt_a, self.dt_b, mask_only=False)
self.assertEqual(real_ans2[0], ans[0])
self.assertEqual(real_ans2[1], ans[1])
ans = tb.tCommon(array(self.dt_a), self.dt_b, mask_only=False)
numpy.testing.assert_equal(real_ans2[0], ans[0])
numpy.testing.assert_equal(real_ans2[1], ans[1])
ans = tb.tCommon(self.dt_a, array(self.dt_b), mask_only=False)
numpy.testing.assert_equal(real_ans2[0], ans[0])
numpy.testing.assert_equal(real_ans2[1], ans[1])
def test_eventTimer(self):
realstdout = sys.stdout
output = io.StringIO()
sys.stdout = output
t1 = time.time()
time.sleep(0.25)
t2 = tb.eventTimer('', t1)
sys.stdout = realstdout
result = output.getvalue()
output.close()
self.assertTrue((0.25 <= (t2 - t1) < 0.28))
m = re.match("^\\('(\\d\\.\\d\\d)', ''\\)\\n$", result)
self.assertTrue(m)
self.assertTrue((0.25 <= float(m.group(1)) < 0.28))
def test_windowMean_outputTimes(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(hours=1)
olap = datetime.timedelta(0)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(minutes=(n * 15))) for n in range(48)]
data = ([10] * 48)
(outdata, outtime) = tb.windowMean(data, time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
od_ans = ([10] * 12)
ot_ans = [datetime.datetime(2001, 1, 1, 0, 30), datetime.datetime(2001, 1, 1, 1, 30), datetime.datetime(2001, 1, 1, 2, 30), datetime.datetime(2001, 1, 1, 3, 30), datetime.datetime(2001, 1, 1, 4, 30), datetime.datetime(2001, 1, 1, 5, 30), datetime.datetime(2001, 1, 1, 6, 30), datetime.datetime(2001, 1, 1, 7, 30), datetime.datetime(2001, 1, 1, 8, 30), datetime.datetime(2001, 1, 1, 9, 30), datetime.datetime(2001, 1, 1, 10, 30), datetime.datetime(2001, 1, 1, 11, 30)]
numpy.testing.assert_almost_equal(od_ans, outdata)
self.assertEqual(ot_ans, outtime)
def test_windowMean1(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
(outdata, outtime) = tb.windowMean(data, time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
od_ans = [15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0]
ot_ans = [datetime.datetime(2001, 1, 1, 12, 0), datetime.datetime(2001, 1, 2, 0, 0), datetime.datetime(2001, 1, 2, 12, 0), datetime.datetime(2001, 1, 3, 0, 0), datetime.datetime(2001, 1, 3, 12, 0), datetime.datetime(2001, 1, 4, 0, 0), datetime.datetime(2001, 1, 4, 12, 0), datetime.datetime(2001, 1, 5, 0, 0), datetime.datetime(2001, 1, 5, 12, 0)]
numpy.testing.assert_almost_equal(od_ans, outdata)
self.assertEqual(ot_ans, outtime)
def test_windowMean2(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
(outdata, outtime) = tb.windowMean(data, time, winsize=wsize, overlap=olap)
od_ans = [15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0]
ot_ans = [datetime.datetime(2001, 1, 1, 12, 30), datetime.datetime(2001, 1, 2, 0, 30), datetime.datetime(2001, 1, 2, 12, 30), datetime.datetime(2001, 1, 3, 0, 30), datetime.datetime(2001, 1, 3, 12, 30), datetime.datetime(2001, 1, 4, 0, 30), datetime.datetime(2001, 1, 4, 12, 30), datetime.datetime(2001, 1, 5, 0, 30), datetime.datetime(2001, 1, 5, 12, 30)]
numpy.testing.assert_almost_equal(od_ans, outdata)
self.assertEqual(ot_ans, outtime)
def test_windowMean3(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
time[50:] = [(val + datetime.timedelta(days=2)) for val in time[50:]]
(outdata, outtime) = tb.windowMean(data, time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
od_ans = [15.0, 15.0, 15.0, 15.0, 15.0, numpy.nan, numpy.nan, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0]
ot_ans = [datetime.datetime(2001, 1, 1, 12, 0), datetime.datetime(2001, 1, 2, 0, 0), datetime.datetime(2001, 1, 2, 12, 0), datetime.datetime(2001, 1, 3, 0, 0), datetime.datetime(2001, 1, 3, 12, 0), datetime.datetime(2001, 1, 4, 0, 0), datetime.datetime(2001, 1, 4, 12, 0), datetime.datetime(2001, 1, 5, 0, 0), datetime.datetime(2001, 1, 5, 12, 0), datetime.datetime(2001, 1, 6, 0, 0), datetime.datetime(2001, 1, 6, 12, 0), datetime.datetime(2001, 1, 7, 0, 0), datetime.datetime(2001, 1, 7, 12, 0)]
numpy.testing.assert_almost_equal(od_ans, outdata)
self.assertEqual(ot_ans, outtime)
def test_windowMean4(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
time[50:] = [(val + datetime.timedelta(days=2)) for val in time[50:]]
(outdata, outtime) = tb.windowMean(data, winsize=24, overlap=12)
od_ans = [15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0]
ot_ans = [12.0, 24.0, 36.0, 48.0, 60.0, 72.0, 84.0]
numpy.testing.assert_almost_equal(ot_ans, outtime)
numpy.testing.assert_almost_equal(od_ans, outdata)
def test_windowMean5(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'windowmean\\:', UserWarning, 'spacepy.toolbox$')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
time[50:] = [(val + datetime.timedelta(days=2)) for val in time[50:]]
(outdata, outtime) = tb.windowMean(data, winsize=24.6, overlap=12)
(od_ans, ot_ans) = tb.windowMean(data, winsize=24.6, overlap=12)
numpy.testing.assert_almost_equal(ot_ans, outtime)
numpy.testing.assert_almost_equal(od_ans, outdata)
(outdata, outtime) = tb.windowMean(data, winsize=0.4)
(od_ans, ot_ans) = tb.windowMean(data, winsize=1.0)
numpy.testing.assert_almost_equal(ot_ans, outtime)
numpy.testing.assert_almost_equal(od_ans, outdata)
(outdata, outtime) = tb.windowMean(data, winsize=1.0, overlap=2)
(od_ans, ot_ans) = tb.windowMean(data, winsize=1.0, overlap=0)
numpy.testing.assert_almost_equal(ot_ans, outtime)
numpy.testing.assert_almost_equal(od_ans, outdata)
def test_windowMean_op(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
(outdata, outtime) = tb.windowMean(data, time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1), op=len)
od_ans = [24, 24, 24, 24, 24, 24, 24, 16, 4]
ot_ans = [datetime.datetime(2001, 1, 1, 12, 0), datetime.datetime(2001, 1, 2, 0, 0), datetime.datetime(2001, 1, 2, 12, 0), datetime.datetime(2001, 1, 3, 0, 0), datetime.datetime(2001, 1, 3, 12, 0), datetime.datetime(2001, 1, 4, 0, 0), datetime.datetime(2001, 1, 4, 12, 0), datetime.datetime(2001, 1, 5, 0, 0), datetime.datetime(2001, 1, 5, 12, 0)]
numpy.testing.assert_almost_equal(od_ans, outdata)
self.assertEqual(ot_ans, outtime)
def test_windowMean_op2(self):
with warnings.catch_warnings():
warnings.simplefilter('always')
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(0)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
(outdata, outtime) = tb.windowMean(data, time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1), op=len)
od_ans = [24, 24, 24, 24, 4]
ot_ans = [datetime.datetime(2001, 1, 1, 12, 0), datetime.datetime(2001, 1, 2, 12, 0), datetime.datetime(2001, 1, 3, 12, 0), datetime.datetime(2001, 1, 4, 12, 0), datetime.datetime(2001, 1, 5, 12, 0)]
numpy.testing.assert_almost_equal(od_ans, outdata)
self.assertEqual(ot_ans, outtime)
def test_windowMeanInputs(self):
wsize = datetime.timedelta(days=1)
olap = datetime.timedelta(hours=12)
data = ([10, 20] * 50)
time = [(datetime.datetime(2001, 1, 1) + datetime.timedelta(hours=n, minutes=30)) for n in range(100)]
self.assertRaises(ValueError, tb.windowMean, data[1:], time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
self.assertRaises(TypeError, tb.windowMean, data, time, winsize='bad', overlap=olap, st_time=datetime.datetime(2001, 1, 1))
self.assertRaises(TypeError, tb.windowMean, data, time, winsize=wsize, overlap='bad', st_time=datetime.datetime(2001, 1, 1))
self.assertRaises(TypeError, tb.windowMean, data, time, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
olap = datetime.timedelta(days=2)
self.assertRaises(ValueError, tb.windowMean, data, time, winsize=wsize, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
time = list(range(len(time)))
self.assertRaises(TypeError, tb.windowMean, data, time, overlap=olap, st_time=datetime.datetime(2001, 1, 1))
def test_windowMean_2d(self):
wsize = 2
olap = 0
data = numpy.arange(20).reshape(10, 2)
out = tb.windowMean(data, winsize=wsize, overlap=olap)
ansd = [1.5, 5.5, 9.5, 13.5]
numpy.testing.assert_almost_equal(ansd, out[0])
anst = [1.0, 3.0, 5.0, 7.0]
numpy.testing.assert_almost_equal(anst, out[1]) |
class dts_ConvAI2(object):
def __init__(self, path=data_path):
self.path = path
def _txt_to_json(self, txt_path, mode, cands):
def pop_one_sample(lines):
self_persona = []
other_persona = []
dialog = []
candidates = []
started = False
while (len(lines) > 0):
line = lines.pop()
(id, context) = line.split(' ', 1)
id = int(id)
context = context.strip()
if (started == False):
assert (id == 1)
started = True
elif (id == 1):
lines.append(line)
break
if context.startswith("partner's persona: "):
assert (mode in ['both', 'other'])
other_persona.append(context[19:])
elif context.startswith('your persona: '):
assert (mode in ['both', 'self'])
self_persona.append(context[13:])
elif (cands == False):
try:
(uttr, response) = context.split('\t', 2)[:2]
dialog.append(uttr)
dialog.append(response)
except:
uttr = context
dialog.append(uttr)
else:
(uttr, response, _, negs) = context.split('\t', 4)[:4]
dialog.append(uttr)
dialog.append(response)
candidates.append(negs.split('|'))
candidates.append(None)
return {'self_persona': self_persona, 'other_persona': other_persona, 'dialog': dialog, 'candidates': candidates}
lines = open(txt_path, 'r').readlines()[::(- 1)]
samples = []
while (len(lines) > 0):
samples.append(pop_one_sample(lines))
return samples
def get_data(self, mode='train', revised=False, cands=False):
txt_path = os.path.join(self.path, '{}_{}_{}{}.txt'.format(mode, 'none', ('revised' if (revised is True) else 'original'), ('' if (cands is True) else '_no_cands')))
assert (mode in ['train', 'valid', 'test', 'all'])
print('Get dialog from ', txt_path)
assert os.path.exists(txt_path)
return self._txt_to_json(txt_path, mode, cands)
def get_dialogs(self, mode='all'):
dialogs = [sample['dialog'] for sample in self.get_data(mode, False, False)]
return dialogs |
def register_Ns3SimpleRefCount__Ns3ChannelCoordinationListener_Ns3Empty_Ns3DefaultDeleter__lt__ns3ChannelCoordinationListener__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::ChannelCoordinationListener, ns3::empty, ns3::DefaultDeleter< ns3::ChannelCoordinationListener > > const &', 'o')])
return |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=Path, required=True)
parser.add_argument('--grid_resolution', type=int, required=True)
parser.add_argument('--camera_coverage_threshold', type=int, required=True)
args = parser.parse_args()
generate_occupancy_grid_from_masks(data_folder=Path(args.data_folder), grid_resolution=args.grid_resolution, camera_coverage_threshold=args.camera_coverage_threshold) |
def register_Ns3RlcListElement_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::RlcListElement const &', 'arg0')])
cls.add_instance_attribute('m_rlcPduElements', 'std::vector< ns3::RlcPduInfo >', is_const=False)
return |
def load(data_dir, subset='train'):
maybe_download_and_extract(data_dir)
if (subset == 'train'):
train_data = [unpickle(os.path.join(data_dir, 'cifar-10-batches-py', ('data_batch_' + str(i)))) for i in range(1, 6)]
trainx = np.concatenate([d['x'] for d in train_data], axis=0)
trainy = np.concatenate([d['y'] for d in train_data], axis=0)
return (trainx, trainy)
elif (subset == 'test'):
test_data = unpickle(os.path.join(data_dir, 'cifar-10-batches-py', 'test_batch'))
testx = test_data['x']
testy = test_data['y']
return (testx, testy)
else:
raise NotImplementedError('subset should be either train or test') |
def Curve(F, A=None):
if (A is None):
if (is_AmbientSpace(F) and (F.dimension() == 1)):
return Curve(F.coordinate_ring().zero(), F)
if is_AlgebraicScheme(F):
return Curve(F.defining_polynomials(), F.ambient_space())
if isinstance(F, (list, tuple)):
P = Sequence(F).universe()
if (not is_MPolynomialRing(P)):
raise TypeError('universe of F must be a multivariate polynomial ring')
for f in F:
if (not f.is_homogeneous()):
A = AffineSpace(P.ngens(), P.base_ring(), names=P.variable_names())
A._coordinate_ring = P
break
else:
A = ProjectiveSpace((P.ngens() - 1), P.base_ring(), names=P.variable_names())
A._coordinate_ring = P
elif isinstance(F, MPolynomial):
P = F.parent()
k = F.base_ring()
if (not k.is_field()):
if k.is_integral_domain():
P = P.change_ring(k.fraction_field())
F = P(F)
k = F.base_ring()
else:
raise TypeError('not a multivariate polynomial over a field or an integral domain')
if (F.parent().ngens() == 2):
if (F == 0):
raise ValueError('defining polynomial of curve must be nonzero')
A = AffineSpace(2, P.base_ring(), names=P.variable_names())
A._coordinate_ring = P
elif (F.parent().ngens() == 3):
if (F == 0):
raise ValueError('defining polynomial of curve must be nonzero')
if ((F.total_degree() == 2) and k.is_field()):
return Conic(k, F)
A = ProjectiveSpace(2, P.base_ring(), names=P.variable_names())
A._coordinate_ring = P
elif (F.parent().ngens() == 1):
if (not F.is_zero()):
raise ValueError('defining polynomial of curve must be zero if the ambient space is of dimension 1')
A = AffineSpace(1, P.base_ring(), names=P.variable_names())
A._coordinate_ring = P
else:
raise TypeError('number of variables of F (={}) must be 2 or 3'.format(F))
F = [F]
else:
raise TypeError('F (={}) must be a multivariate polynomial'.format(F))
else:
if (not is_AmbientSpace(A)):
raise TypeError('ambient space must be either an affine or projective space')
if (not isinstance(F, (list, tuple))):
F = [F]
if (not all(((f.parent() == A.coordinate_ring()) for f in F))):
raise TypeError('need a list of polynomials of the coordinate ring of {}'.format(A))
n = A.dimension_relative()
if (n < 1):
raise TypeError('ambient space should be an affine or projective space of positive dimension')
k = A.base_ring()
if is_AffineSpace(A):
if (n != 2):
if isinstance(k, FiniteField):
if A.coordinate_ring().ideal(F).is_prime():
return IntegralAffineCurve_finite_field(A, F)
if (k in Fields()):
if ((k == QQ) and A.coordinate_ring().ideal(F).is_prime()):
return IntegralAffineCurve(A, F)
return AffineCurve_field(A, F)
return AffineCurve(A, F)
if (not ((len(F) == 1) and (F[0] != 0) and (F[0].degree() > 0))):
raise TypeError('need a single nonconstant polynomial to define a plane curve')
F = F[0]
if isinstance(k, FiniteField):
if _is_irreducible_and_reduced(F):
return IntegralAffinePlaneCurve_finite_field(A, F)
return AffinePlaneCurve_finite_field(A, F)
if (k in Fields()):
if ((k == QQ) and _is_irreducible_and_reduced(F)):
return IntegralAffinePlaneCurve(A, F)
return AffinePlaneCurve_field(A, F)
return AffinePlaneCurve(A, F)
elif is_ProjectiveSpace(A):
if (n != 2):
if (not all((f.is_homogeneous() for f in F))):
raise TypeError('polynomials defining a curve in a projective space must be homogeneous')
if isinstance(k, FiniteField):
if A.coordinate_ring().ideal(F).is_prime():
return IntegralProjectiveCurve_finite_field(A, F)
if (k in Fields()):
if ((k == QQ) and A.coordinate_ring().ideal(F).is_prime()):
return IntegralProjectiveCurve(A, F)
return ProjectiveCurve_field(A, F)
return ProjectiveCurve(A, F)
if (not ((len(F) == 1) and (F[0] != 0) and (F[0].degree() > 0))):
raise TypeError('need a single nonconstant polynomial to define a plane curve')
F = F[0]
if (not F.is_homogeneous()):
raise TypeError('{} is not a homogeneous polynomial'.format(F))
if isinstance(k, FiniteField):
if _is_irreducible_and_reduced(F):
return IntegralProjectivePlaneCurve_finite_field(A, F)
return ProjectivePlaneCurve_finite_field(A, F)
if (k in Fields()):
if ((k == QQ) and _is_irreducible_and_reduced(F)):
return IntegralProjectivePlaneCurve(A, F)
return ProjectivePlaneCurve_field(A, F)
return ProjectivePlaneCurve(A, F)
else:
raise TypeError('ambient space neither affine nor projective') |
class Logger(nn.Module):
def __init__(self):
super(Logger, self).__init__()
self.stats = {}
def forward(self, x):
pass |
def load_config_from_json(filepath):
with open(filepath, 'rb') as f:
data = json.load(f)
dot_list = []
for key in data.keys():
dot_list.append(f"{key}={data[key]['value']}")
return OmegaConf.from_dotlist(dot_list) |
def test_generator(multiple_databases) -> TestGenerator:
return TestGenerator(databases=multiple_databases) |
def save_scripts(path, scripts_to_save=None):
if (not os.path.exists(os.path.join(path, 'scripts'))):
os.makedirs(os.path.join(path, 'scripts'))
if (scripts_to_save is not None):
for script in scripts_to_save:
dst_path = os.path.join(path, 'scripts', script)
try:
shutil.copy(script, dst_path)
except IOError:
os.makedirs(os.path.dirname(dst_path))
shutil.copy(script, dst_path) |
class BinanceWithdraw(VirtualFunctionTool):
name = 'BinanceWithdraw'
summary = "Withdraw a specified amount of cryptocurrency or fiat money to a specified destination address or bank account from user's account. The bank account id must be retrieved using the RetrieveAccounts tool."
parameters: List[ArgParameter] = [{'name': 'currency', 'type': 'string', 'description': "The currency to withdraw, one of ['USD', 'BTC', 'ETH', etc.].", 'required': True}, {'name': 'amount', 'type': 'number', 'description': 'The amount of cryptocurrency to withdraw.', 'required': True}, {'name': 'destination_address', 'type': 'string', 'description': 'The suitable blockchain address for withdrawing the cryptocurrency, which must be a complete and valid legacy Bitcoin address or SegWit address.', 'required': False}, {'name': 'to_account_id', 'type': 'string', 'description': "The user's saved bank account id to withdraw fiat money to.", 'required': False}]
returns: List[ArgReturn] = [{'name': 'transaction_id', 'type': 'string', 'description': 'The unique identifier of the withdrawal transaction.'}, {'name': 'status', 'type': 'string', 'description': 'The status of the withdrawal transaction.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': 'The currency is malformed or empty, or the amount is not positive, the destination_address is incomplete, or the destination_address is not a valid Bitcoin address or SegWit address.'}, {'name': 'NotFoundException', 'description': 'The to_account_id does not exist.'}] |
class VGG16(nn.Module):
def __init__(self, n_inputs=12, numCls=17):
super().__init__()
vgg = models.vgg16(pretrained=False)
self.encoder = nn.Sequential(nn.Conv2d(n_inputs, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), *vgg.features[1:])
self.classifier = nn.Sequential(nn.Linear(((8 * 8) * 512), 4096, bias=True), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096, bias=True), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, numCls, bias=True))
self.apply(weights_init_kaiming)
self.apply(fc_init_weights)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.size(0), (- 1))
logits = self.classifier(x)
return logits |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, last=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.last = last
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
if (not self.last):
out = self.relu(out)
return out |
def new(mode, size, color=0):
_check_size(size)
if (color is None):
return Image()._new(core.new(mode, size))
if isinstance(color, str):
from . import ImageColor
color = ImageColor.getcolor(color, mode)
im = Image()
if ((mode == 'P') and isinstance(color, (list, tuple)) and (len(color) in [3, 4])):
from . import ImagePalette
im.palette = ImagePalette.ImagePalette()
color = im.palette.getcolor(color)
return im._new(core.fill(mode, size, color)) |
def embedded_cnn(x):
emdded = get_emdedding_layer()(x)
conv_layers = []
for (n_gram, hidden_units) in zip(KERNEL_SIZE, NUMBER_OF_FILTERS):
conv_layer = Conv1D(filters=hidden_units, kernel_size=n_gram, padding='valid', activation='relu')(emdded)
conv_layer = GlobalMaxPooling1D()(conv_layer)
conv_layers.append(conv_layer)
if (len(conv_layers) == 1):
return conv_layers[0]
else:
all_conv_layers_merged = Concatenate()(conv_layers)
return all_conv_layers_merged |
def root_mean_square_error(y_true, y_pred):
(y_true, y_pred) = (np.array(y_true), np.array(y_pred))
score = np.sqrt(np.mean(((y_pred - y_true) ** 2)))
return score |
class Function_tanh(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'tanh', latex_name='\\tanh') |
class Rubiks(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'rubiks', [cu2(), size222(), optimal(), mcube(), dikcube(), cubex()], spkg='rubiks') |
class GradientRegistry(object):
gradient_registry_ = {}
def RegisterGradient(cls, op_type):
def Wrapper(func):
cls.gradient_registry_[op_type] = func
return func
return Wrapper
def _GetGradientForOpCC(cls, op_def, g_output):
def from_untyped(grad):
if (grad is None):
w = C.GradientWrapper()
assert w.is_empty()
return w
try:
(indices, values) = grad
w = C.GradientWrapper()
w.indices = indices
w.values = values
assert w.is_sparse()
return w
except ValueError:
w = C.GradientWrapper()
w.dense = grad
assert w.is_dense()
return w
g_output = [from_untyped(grad) for grad in g_output]
(grad_defs_str, g_input) = C.get_gradient_defs(op_def.SerializeToString(), g_output)
def to_untyped(grad_wrapper):
if grad_wrapper.is_empty():
return None
if grad_wrapper.is_sparse():
return GradientSlice(grad_wrapper.indices, grad_wrapper.values)
assert grad_wrapper.is_dense()
return grad_wrapper.dense
g_input = [to_untyped(grad_wrapper) for grad_wrapper in g_input]
grad_defs = []
for grad_def_str in grad_defs_str:
grad_def = caffe2_pb2.OperatorDef()
grad_def.ParseFromString(grad_def_str)
grad_defs.append(grad_def)
return (grad_defs, g_input)
def GetGradientForOp(cls, op, g_output):
try:
(gradient_ops, g_input) = cls._GetGradientForOpCC(op, g_output)
except Exception as e:
if (op.type in cls.gradient_registry_):
(gradient_ops, g_input) = cls.gradient_registry_[op.type](op, g_output)
else:
raise Exception('Exception when creating gradient for [{}]:{}.\nOp: \n{}'.format(op.type, e, str(op)))
if (gradient_ops is None):
return ([], g_input)
if (type(gradient_ops) is not list):
gradient_ops = [gradient_ops]
return (gradient_ops, g_input)
def GetBackwardPass(cls, operators, ys, ys_generate_gradient=False):
ir = IR(operators)
return ir.GetBackwardPass(ys) |
class ColaProcessor(DataProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence'].numpy().decode('utf-8'), None, str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
test_mode = (set_type == 'test')
if test_mode:
lines = lines[1:]
text_index = (1 if test_mode else 3)
examples = []
for (i, line) in enumerate(lines):
guid = f'{set_type}-{i}'
text_a = line[text_index]
label = (None if test_mode else line[1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples |
class SimpleAEWithLinear(BaseAE):
def __init__(self, input_shape: Tuple[int], latent_dim: int, visualisation_channels):
super().__init__(visualisation_channels)
self.latent_dim = latent_dim
channels = input_shape[0]
self.encoder = nn.Sequential(nn.Conv2d(channels, 64, 3, stride=2, padding=1), nn.LeakyReLU(), nn.Conv2d(64, 128, 3, stride=2, padding=1), nn.LeakyReLU(), nn.Conv2d(128, 256, 7), nn.LeakyReLU())
self.width = (((input_shape[1] // 2) // 2) - 6)
self.lin_enc = nn.Linear(((256 * self.width) * self.width), latent_dim)
self.lin_dec = nn.Linear(latent_dim, ((256 * self.width) * self.width))
self.decoder = nn.Sequential(nn.ConvTranspose2d(256, 128, 7), nn.LeakyReLU(), nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1), nn.LeakyReLU(), nn.ConvTranspose2d(64, channels, 3, stride=2, padding=1, output_padding=1))
def encode(self, input: Tensor) -> Tensor:
x = self.encoder(input)
return self.lin_enc(x.view(x.shape[0], (- 1)))
def decode(self, input: Tensor) -> Tensor:
x = self.lin_dec(input)
x = x.view(input.shape[0], 256, self.width, self.width)
return self.decoder(x) |
def _timestamp_to_seconds(timestamp: str):
parts = timestamp.split(':')
seconds = float(parts[(- 1)])
seconds += (float(parts[(- 2)]) * 60)
seconds += ((float(parts[(- 3)]) * 60) * 60)
return seconds |
def get_optimizer(args, net):
base_params = []
for (name, param) in net.named_parameters():
base_params.append(param)
if args.sgd:
optimizer = optim.SGD(base_params, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=False)
else:
raise ValueError('Not a valid optimizer')
if (args.lr_schedule == 'scl-poly'):
if (cfg.REDUCE_BORDER_ITER == (- 1)):
raise ValueError('ERROR Cannot Do Scale Poly')
rescale_thresh = cfg.REDUCE_BORDER_ITER
scale_value = args.rescale
lambda1 = (lambda iteration: (math.pow((1 - (iteration / args.max_iter)), args.poly_exp) if (iteration < rescale_thresh) else (scale_value * math.pow((1 - ((iteration - rescale_thresh) / (args.max_iter - rescale_thresh))), args.repoly))))
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
elif (args.lr_schedule == 'poly'):
lambda1 = (lambda iteration: math.pow((1 - (iteration / args.max_iter)), args.poly_exp))
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
else:
raise ValueError('unknown lr schedule {}'.format(args.lr_schedule))
return (optimizer, scheduler) |
def prior(rng=None):
if (rng is None):
rng = np.random.default_rng()
beta = rng.normal(0, 2)
f = rng.multivariate_normal(np.zeros(9), Cov)
return np.append(beta, f) |
def evaluate(model, dataloader, logger, device):
score = 0
number = 0
model.eval()
with torch.no_grad():
for (i, row) in enumerate(dataloader):
(image_data, question, target, answer_type, question_type, phrase_type, answer_target) = row
(question, answer_target) = (question.to(device), answer_target.to(device))
output = model(question)
pred = output.data.max(1)[1]
correct = pred.eq(answer_target.data).cpu().sum()
score += correct.item()
number += len(answer_target)
score = ((score / number) * 100.0)
logger.info('[Validate] Val_Acc:{:.6f}%'.format(score))
return score |
def _warn_keyword_parameter(func_name, kwargs):
if (not kwargs):
return False
elif ((len(kwargs) > 1) or ('warn' not in kwargs)):
kwargs.pop('warn', None)
arg = next(iter(kwargs.keys()))
raise TypeError('{}() got an unexpected keyword argument {!r}'.format(func_name, arg))
return kwargs['warn'] |
class ComparableMixin():
def __eq__(self, other):
if ((self is None) and (other is not None)):
return False
elif ((self is not None) and (other is None)):
return False
else:
return ((not (self < other)) and (not (other < self)))
def __ne__(self, other):
return ((self < other) or (other < self))
def __gt__(self, other):
return (other < self)
def __ge__(self, other):
return (not (self < other))
def __le__(self, other):
return (not (other < self)) |
def option():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', default=200, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=16, type=int, metavar='N')
parser.add_argument('--lr', '--learning-rate', default=0.0005, type=float, metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int, help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ')
parser.add_argument('--gpu', default=0, type=int, help='GPU id to use.')
parser.add_argument('--dim', default=128, type=int, help='feature dimension (default: 128)')
parser.add_argument('-k', default=2048, type=int, help='queue size; number of negative keys (default: 2048)')
parser.add_argument('-m', default=0.9, type=float, help='momentum of updating key encoder (default: 0.9)')
parser.add_argument('-t', default=0.07, type=float, help='softmax temperature (default: 0.07)')
parser.add_argument('--pretrained', default='', type=str, help='path to pretrained checkpoint')
parser.add_argument('-p', '--print-freq', default=100, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--cos', action='store_true', help='use cosine lr schedule')
args = parser.parse_args()
return args |
def test_aliasing():
c = 1
A_ub = [[1]]
b_ub = [1]
A_eq = [[1]]
b_eq = [1]
bounds = ((- np.inf), np.inf)
c_copy = deepcopy(c)
A_ub_copy = deepcopy(A_ub)
b_ub_copy = deepcopy(b_ub)
A_eq_copy = deepcopy(A_eq)
b_eq_copy = deepcopy(b_eq)
bounds_copy = deepcopy(bounds)
_clean_inputs(c, A_ub, b_ub, A_eq, b_eq, bounds)
assert_((c == c_copy), 'c modified by _clean_inputs')
assert_((A_ub == A_ub_copy), 'A_ub modified by _clean_inputs')
assert_((b_ub == b_ub_copy), 'b_ub modified by _clean_inputs')
assert_((A_eq == A_eq_copy), 'A_eq modified by _clean_inputs')
assert_((b_eq == b_eq_copy), 'b_eq modified by _clean_inputs')
assert_((bounds == bounds_copy), 'bounds modified by _clean_inputs') |
class Reshape(LoopEntryTransform):
def __init__(self, shapes: dict) -> None:
super().__init__(loop_axis=None, entries=tuple(shapes.keys()))
self.shapes = shapes
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return np.reshape(np_entry, self.shapes[entry]) |
def init_gans(target):
for m in target.modules():
if isinstance(m, nn.modules.conv._ConvNd):
m.weight.data.normal_(0.0, 0.02)
if (hasattr(m, 'bias') and (m.bias is not None)):
m.bias.data.zero_()
if isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, 0.02)
if (hasattr(m, 'bias') and (m.bias is not None)):
m.bias.data.zero_() |
def prepare_ctrl_input(args, _, tokenizer, prompt_text):
if (args.temperature > 0.7):
logger.info('CTRL typically works better with lower temperatures (and lower top_k).')
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)
if (not any(((encoded_prompt[0] == x) for x in tokenizer.control_codes.values()))):
logger.info("WARNING! You are not starting your generation from a control code so you won't get good results")
return prompt_text |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.