code stringlengths 101 5.91M |
|---|
class XmodForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def read(*names, **kwargs):
with io.open(join(dirname(__file__), *names), encoding=kwargs.get('encoding', 'utf8')) as fh:
return fh.read() |
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
input_image = F.upsample(input_image, size=(256, 256), mode='bilinear')
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if (image_numpy.shape[0] == 1):
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (((np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0) * 255.0)
return image_numpy.astype(imtype) |
.ml_cpu_only
.parametrize('batch_size', [2, 3, 8])
def test_radius_search_batches(ml, batch_size):
dtype = np.float32
metric = 'L2'
p_norm = {'L1': 1, 'L2': 2, 'Linf': np.inf}[metric]
ignore_query_point = False
return_distances = True
normalize_distances = True
rng = np.random.RandomState(123)
points_row_splits = np.zeros(shape=((batch_size + 1),), dtype=np.int64)
queries_row_splits = np.zeros(shape=((batch_size + 1),), dtype=np.int64)
for i in range(batch_size):
points_row_splits[(i + 1)] = (rng.randint(15) + points_row_splits[i])
queries_row_splits[(i + 1)] = (rng.randint(15) + queries_row_splits[i])
num_points = points_row_splits[(- 1)]
num_queries = queries_row_splits[(- 1)]
points = rng.random(size=(num_points, 3)).astype(dtype)
if ignore_query_point:
queries = points
queries_row_splits = points_row_splits
else:
queries = rng.random(size=(num_queries, 3)).astype(dtype)
radii = rng.uniform(0.1, 0.3, size=queries.shape[:1]).astype(dtype)
gt_neighbors_index = []
for i in range(batch_size):
points_i = points[points_row_splits[i]:points_row_splits[(i + 1)]]
queries_i = queries[queries_row_splits[i]:queries_row_splits[(i + 1)]]
radii_i = radii[queries_row_splits[i]:queries_row_splits[(i + 1)]]
tree = cKDTree(points_i, copy_data=True)
gt_neighbors_index.extend([list((tree.query_ball_point(q, r, p=p_norm) + points_row_splits[i])) for (q, r) in zip(queries_i, radii_i)])
layer = ml.layers.RadiusSearch(metric=metric, ignore_query_point=ignore_query_point, normalize_distances=normalize_distances, return_distances=return_distances)
ans = mltest.run_op(ml, ml.device, True, layer, points, queries=queries, radii=radii, points_row_splits=points_row_splits, queries_row_splits=queries_row_splits)
for (i, q) in enumerate(queries):
start = ans.neighbors_row_splits[i]
end = ans.neighbors_row_splits[(i + 1)]
q_neighbors_index = ans.neighbors_index[start:end]
gt_set = set(gt_neighbors_index[i])
if ignore_query_point:
gt_set.remove(i)
assert (gt_set == set(q_neighbors_index))
if return_distances:
q_neighbors_dist = ans.neighbors_distance[start:end]
for (j, dist) in zip(q_neighbors_index, q_neighbors_dist):
if (metric == 'L2'):
gt_dist = np.sum(((q - points[j]) ** 2))
if normalize_distances:
gt_dist /= (radii[i] ** 2)
else:
gt_dist = np.linalg.norm((q - points[j]), ord=p_norm)
if normalize_distances:
gt_dist /= radii[i]
np.testing.assert_allclose(dist, gt_dist, rtol=1e-07, atol=1e-08) |
def dobldobl_laursys_solve(pols, topdim=(- 1), filter=True, factor=True, tasks=0, verbose=True):
from phcpy.phcpy2c3 import py2c_dobldobl_laursys_solve
from phcpy.phcpy2c3 import py2c_copy_dobldobl_laursys_witset
from phcpy.solver import number_of_symbols
from phcpy.interface import store_dobldobl_laurent_system
from phcpy.interface import load_dobldobl_laurent_system
from phcpy.interface import load_dobldobl_solutions
dim = number_of_symbols(pols)
if (topdim == (- 1)):
topdim = (dim - 1)
fail = store_dobldobl_laurent_system(pols, nbvar=dim)
fail = py2c_dobldobl_laursys_solve(tasks, topdim, int(filter), int(factor), int(verbose))
witsols = []
for soldim in range(0, (topdim + 1)):
fail = py2c_copy_dobldobl_laursys_witset(soldim)
witset = (load_dobldobl_laurent_system(), load_dobldobl_solutions())
witsols.append(witset)
return witsols |
def get_model(config):
class SimpleModel(gluon.Block):
def __init__(self, **kwargs):
super(SimpleModel, self).__init__(**kwargs)
self.fc1 = nn.Dense(20)
self.fc2 = nn.Dense(10)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
net = SimpleModel()
net.initialize(mx.init.Xavier(magnitude=2.24), ctx=[mx.cpu()])
return net |
def default_compute_objective(metrics: Dict[(str, float)]) -> float:
metrics = copy.deepcopy(metrics)
loss = metrics.pop('eval_loss', None)
_ = metrics.pop('epoch', None)
speed_metrics = [m for m in metrics.keys() if (m.endswith('_runtime') or m.endswith('_per_second') or m.endswith('_compilation_time'))]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return (loss if (len(metrics) == 0) else sum(metrics.values())) |
def nevergrad_get_setting(self):
method = self._method_chooser.ask()
params = self._optimizers[method.args[0]].ask()
return {'method_token': method, 'method': method.args[0], 'params_token': params, 'params': params.args[0]} |
class ResNet(Model):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (not (len(replace_stride_with_dilation) == 3)):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def features(self, x):
out1 = self.maxpool(self.relu(self.bn1(self.conv1(x))))
out2 = self.layer1(out1)
out3 = self.layer2(out2)
out4 = self.layer3(out3)
out5 = self.layer4(out4)
out5 = out5.view(out5.size()[0], (- 1))
return out5
def first_activations(self, x):
x = self.relu(self.bn1(self.conv1(x)))
return x
def forward(self, x, latent=False):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
layer4_out = self.layer4(x)
if layer4_out.requires_grad:
layer4_out.register_hook(self.activations_hook)
x = self.avgpool(layer4_out)
flatten_x = torch.flatten(x, 1)
x = self.fc(flatten_x)
if latent:
return (x, flatten_x)
else:
return x |
class RMSLELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return torch.sqrt(self.mse(torch.log((input + 1)), torch.log((target + 1)))) |
def train():
if (args.model == 'QRNN'):
model.reset()
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
(batch, i) = (0, 0)
while (i < ((train_data.size(0) - 1) - 1)):
bptt = (args.bptt if (np.random.random() < 0.95) else (args.bptt / 2.0))
seq_len = max(5, int(np.random.normal(bptt, 5)))
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = ((lr2 * seq_len) / args.bptt)
model.train()
(data, targets) = get_batch(train_data, i, args, seq_len=seq_len)
hidden = repackage_hidden(hidden)
optimizer.zero_grad()
(output, hidden, rnn_hs, dropped_rnn_hs) = model(data, hidden, return_h=True)
raw_loss = criterion(model.decoder.weight, model.decoder.bias, output, targets)
loss = raw_loss
if args.alpha:
loss = (loss + sum(((args.alpha * dropped_rnn_h.pow(2).mean()) for dropped_rnn_h in dropped_rnn_hs[(- 1):])))
if args.beta:
loss = (loss + sum(((args.beta * (rnn_h[1:] - rnn_h[:(- 1)]).pow(2).mean()) for rnn_h in rnn_hs[(- 1):])))
loss.backward()
if args.clip:
torch.nn.utils.clip_grad_norm_(params, args.clip)
optimizer.step()
total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if (((batch % args.log_interval) == 0) and (batch > 0)):
cur_loss = (total_loss.item() / args.log_interval)
elapsed = (time.time() - start_time)
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | loss {:5.2f} | ppl {:8.2f} | bpc {:8.3f}'.format(epoch, batch, (len(train_data) // args.bptt), optimizer.param_groups[0]['lr'], ((elapsed * 1000) / args.log_interval), cur_loss, math.exp(cur_loss), (cur_loss / math.log(2))))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len |
def k_fold(dataset, folds=10):
skf = StratifiedKFold(folds, shuffle=True, random_state=12345)
(train_indices, test_indices) = ([], [])
ys = dataset.data.y
for (train, test) in skf.split(torch.zeros(len(dataset)), ys):
train_indices.append(torch.from_numpy(train).to(torch.long))
test_indices.append(torch.from_numpy(test).to(torch.long))
return (train_indices, test_indices) |
def reconstruction_error_vis(S1, S2, reduction='mean', visible_kpts=None):
assert (visible_kpts is not None)
S1_hat = compute_similarity_transform_batch(S1, S2)
re = ((np.sqrt(((S1_hat - S2) ** 2).sum(axis=(- 1))) * visible_kpts).sum(axis=(- 1)) / visible_kpts.sum(axis=(- 1)))
return re |
def load_model_from_config(config, ckpt, verbose=False):
print(f'Loading model from {ckpt}')
pl_sd = torch.load(ckpt, map_location='cpu')
if ('global_step' in pl_sd):
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd['state_dict']
model = instantiate_from_config(config.model)
(m, u) = model.load_state_dict(sd, strict=False)
if ((len(m) > 0) and verbose):
print('missing keys:')
print(m)
if ((len(u) > 0) and verbose):
print('unexpected keys:')
print(len(u))
model.eval()
return model |
def load_extrinsics(path_trajectory, config):
extrinsics = []
if path_trajectory.endswith('log'):
data = o3d.io.read_pinhole_camera_trajectory(path_trajectory)
for param in data.parameters:
extrinsics.append(param.extrinsic)
elif path_trajectory.endswith('json'):
data = o3d.io.read_pose_graph(path_trajectory)
for node in data.nodes:
extrinsics.append(np.linalg.inv(node.pose))
if (config.engine == 'legacy'):
return extrinsics
elif (config.engine == 'tensor'):
return list(map((lambda x: o3d.core.Tensor(x, o3d.core.Dtype.Float64)), extrinsics))
else:
print('Unsupported engine {}'.format(config.engine)) |
class Test_angle_sequence(unittest.TestCase):
def test_poly2laurent_1(self):
pcoefs = np.array([((- 3) - 2j), 0.0, (26 + 10j), 0.0, ((- 24) - 8j)])
expected = (np.array([((- 3) - 1j), (1 + 1j), 2.0, (1 + 1j), ((- 3) - 1j)]) / 2.0)
result = poly2laurent(pcoefs)
self.assertAlmostEqual(np.max(np.abs((expected - result))), 0.0)
def test_poly2laurent_2(self):
pcoefs = np.array([0.0, ((- 5) + 5j), 0.0, (8 - 4j)])
expected = (np.array([(2 - 1j), (1 + 2j), (1 + 2j), (2 - 1j)]) / 2.0)
result = poly2laurent(pcoefs)
self.assertAlmostEqual(np.max(np.abs((expected - result))), 0.0)
def test_poly2laurent_3(self):
pcoefs = np.array([1.0, ((- 5) + 5j), 0.0, (8 - 4j)])
with self.assertRaises(AngleFindingError):
poly2laurent(pcoefs)
def test_response_1(self):
pcoefs = [0, 1]
poly = Polynomial(pcoefs)
QuantumSignalProcessingPhases(poly, signal_operator='Wx')
QuantumSignalProcessingPhases(poly, signal_operator='Wz')
def test_response_2(self):
pcoefs = [(- 1), 0, 2]
poly = Polynomial(pcoefs)
QuantumSignalProcessingPhases(poly, signal_operator='Wx')
QuantumSignalProcessingPhases(poly, signal_operator='Wz')
def test_response_3(self):
pcoefs = [0, (- 3), 0, 4]
poly = Polynomial(pcoefs)
QuantumSignalProcessingPhases(poly, signal_operator='Wx')
QuantumSignalProcessingPhases(poly, signal_operator='Wz')
def test_response_4(self):
pcoefs = [0.0, ((- 2) + 1j), 0.0, 2.0]
poly = Polynomial(pcoefs)
QuantumSignalProcessingPhases(poly, signal_operator='Wx', measurement='z')
def test_response_5(self):
pcoefs = [(- 1.0), 0.0, 50.0, 0.0, (- 400.0), 0.0, 1120.0, 0.0, (- 1280.0), 0.0, 512.0]
poly = Polynomial(pcoefs)
QuantumSignalProcessingPhases(poly, signal_operator='Wx')
QuantumSignalProcessingPhases(poly, signal_operator='Wz')
QuantumSignalProcessingPhases(poly, signal_operator='Wx', measurement='z')
def test_response_6(self):
pcoefs = [(- 1.0), 0.0, ((1 / 2) * ((4 + 3j) - ((1 - 2j) * np.sqrt(3)))), 0.0, ((1 - 1j) * ((- 1j) + np.sqrt(3)))]
poly = Polynomial(pcoefs)
QuantumSignalProcessingPhases(poly, signal_operator='Wx', measurement='z') |
def nfsp_oshi_ppo_avg_policy_params_two_layers_no_valid_actions_model(env: MultiAgentEnv) -> Dict[(str, Any)]:
params = nfsp_leduc_avg_policy_params(env=env)
params['model']['custom_model'] = None
params['model']['fcnet_hiddens'] = [64, 64]
return params |
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = (self.__class__.__name__ + '(name={}, items={})'.format(self._name, list(self._module_dict.keys())))
return format_str
def name(self):
return self._name
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class, force=False):
if (not inspect.isclass(module_class)):
raise TypeError('module must be a class, but got {}'.format(type(module_class)))
module_name = module_class.__name__
if ((not force) and (module_name in self._module_dict)):
raise KeyError('{} is already registered in {}'.format(module_name, self.name))
self._module_dict[module_name] = module_class
def register_module(self, cls=None, force=False):
if (cls is None):
return partial(self.register_module, force=force)
self._register_module(cls, force=force)
return cls |
('/update_log.xml')
def products_xml(competitions=competitions):
num_largest = 2
comps_pubtime = np.array([int(i['pubtime'].replace('-', '')) for i in competitions])
time_largest = heapq.nlargest(num_largest, np.unique(comps_pubtime))
index_largest = [np.where((comps_pubtime == largest_value))[0].tolist() for largest_value in time_largest]
competitions = [(competitions[largest_index], block_time) for (block_time, largest_time) in enumerate(index_largest) for largest_index in largest_time]
[comp.update({'pubtime': datetime.datetime.fromtimestamp(int(datetime.datetime.strptime(comp['pubtime'], '%Y-%m-%d').timestamp()), pytz.timezone('Asia/Shanghai')).strftime('%a, %d %b %Y')}) for (comp, _) in competitions]
output = '<?xml version="1.0" encoding="UTF-8" ?>'
output += '<rss version="2.0">'
output += '<channel>'
output += '<title>Data Science Challenge / Competition</title>'
output += '<link>
update_block = [datetime.datetime.fromtimestamp(int(datetime.datetime.strptime(str(time_largest[block]), '%Y%m%d').timestamp()), pytz.timezone('Asia/Shanghai')).strftime('%m/%d/%Y') for block in range(num_largest)]
output += '<description>Latest update at {} (GMT+0800).</description>'.format(update_block[0])
for (comp, _) in competitions:
output += '<item>'
output += '<title>{:s}</title>'.format(comp['title'])
output += '<link>{:s}</link>'.format(comp['url'])
output += '<category>{:s}</category>'.format('/'.join(comp['type1']))
output += '<category>{:s}</category>'.format('/'.join(comp['type2']))
output += '<pubDate>{:s}</pubDate>'.format(comp['pubtime'])
output += '<description>{:s}</description>'.format(comp['note'].replace('<br>', ''))
output += '</item>'
output += '</channel>'
output += '</rss>'
return Response(output, mimetype='application/xml') |
class EpisodeIterator(Iterator):
def __init__(self, episodes: List[T], cycle: bool=True, shuffle: bool=False, group_by_scene: bool=True, max_scene_repeat_episodes: int=(- 1), max_scene_repeat_steps: int=(- 1), num_episode_sample: int=(- 1), step_repetition_range: float=0.2, seed: int=None):
if seed:
random.seed(seed)
np.random.seed(seed)
if (num_episode_sample >= 0):
episodes = np.random.choice(episodes, num_episode_sample, replace=False)
self.episodes = episodes
self.cycle = cycle
self.group_by_scene = group_by_scene
self.shuffle = shuffle
if shuffle:
random.shuffle(self.episodes)
if group_by_scene:
self.episodes = self._group_scenes(self.episodes)
self.max_scene_repetition_episodes = max_scene_repeat_episodes
self.max_scene_repetition_steps = max_scene_repeat_steps
self._rep_count = (- 1)
self._step_count = 0
self._prev_scene_id = None
self._iterator = iter(self.episodes)
self.step_repetition_range = step_repetition_range
self._set_shuffle_intervals()
def __iter__(self):
return self
def __next__(self):
self._forced_scene_switch_if()
next_episode = next(self._iterator, None)
if (next_episode is None):
if (not self.cycle):
raise StopIteration
self._iterator = iter(self.episodes)
if self.shuffle:
self._shuffle()
next_episode = next(self._iterator)
if ((self._prev_scene_id != next_episode.scene_id) and (self._prev_scene_id is not None)):
self._rep_count = 0
self._step_count = 0
self._prev_scene_id = next_episode.scene_id
return next_episode
def _forced_scene_switch(self) -> None:
grouped_episodes = [list(g) for (k, g) in groupby(self._iterator, key=(lambda x: x.scene_id))]
if (len(grouped_episodes) > 1):
grouped_episodes = (grouped_episodes[1:] + grouped_episodes[0:1])
self._iterator = iter(sum(grouped_episodes, []))
def _shuffle(self) -> None:
assert self.shuffle
episodes = list(self._iterator)
random.shuffle(episodes)
if self.group_by_scene:
episodes = self._group_scenes(episodes)
self._iterator = iter(episodes)
def _group_scenes(self, episodes):
assert self.group_by_scene
scene_sort_keys = {}
for e in episodes:
if (e.scene_id not in scene_sort_keys):
scene_sort_keys[e.scene_id] = len(scene_sort_keys)
return sorted(episodes, key=(lambda e: scene_sort_keys[e.scene_id]))
def step_taken(self):
self._step_count += 1
def _randomize_value(value, value_range):
return random.randint(int((value * (1 - value_range))), int((value * (1 + value_range))))
def _set_shuffle_intervals(self):
if (self.max_scene_repetition_episodes > 0):
self._max_rep_episode = self.max_scene_repetition_episodes
else:
self._max_rep_episode = None
if (self.max_scene_repetition_steps > 0):
self._max_rep_step = self._randomize_value(self.max_scene_repetition_steps, self.step_repetition_range)
else:
self._max_rep_step = None
def _forced_scene_switch_if(self):
do_switch = False
self._rep_count += 1
if ((self._max_rep_episode is not None) and (self._rep_count >= self._max_rep_episode)):
do_switch = True
if ((self._max_rep_step is not None) and (self._step_count >= self._max_rep_step)):
do_switch = True
if do_switch:
self._forced_scene_switch()
self._set_shuffle_intervals() |
def _test():
import torch
pretrained = False
models = [vgg11, vgg13, vgg16, vgg19, bn_vgg11, bn_vgg13, bn_vgg16, bn_vgg19, bn_vgg11b, bn_vgg13b, bn_vgg16b, bn_vgg19b]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != vgg11) or (weight_count == ))
assert ((model != vgg13) or (weight_count == ))
assert ((model != vgg16) or (weight_count == ))
assert ((model != vgg19) or (weight_count == ))
assert ((model != bn_vgg11) or (weight_count == ))
assert ((model != bn_vgg13) or (weight_count == ))
assert ((model != bn_vgg16) or (weight_count == ))
assert ((model != bn_vgg19) or (weight_count == ))
assert ((model != bn_vgg11b) or (weight_count == ))
assert ((model != bn_vgg13b) or (weight_count == ))
assert ((model != bn_vgg16b) or (weight_count == ))
assert ((model != bn_vgg19b) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
class Exp_Basic(object):
def __init__(self, args):
self.args = args
self.device = self._acquire_device()
self.model = self._build_model().to(self.device)
def _build_model(self):
raise NotImplementedError
return None
def _acquire_device(self):
if self.args.use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = (str(self.args.gpu) if (not self.args.use_multi_gpu) else self.args.devices)
device = torch.device('cuda:{}'.format(self.args.gpu))
print('Use GPU: cuda:{}'.format(self.args.gpu))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def _get_data(self):
pass
def vali(self):
pass
def train(self):
pass
def test(self):
pass |
def vqa_collate(inputs):
(qids, input_ids, attn_masks_txt, img_input_ids, img_feats, img_pos_feats, attn_masks_img, targets) = map(list, unzip(inputs))
txt_lens = [i.size(0) for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = torch.arange(0, input_ids.size(1), dtype=torch.long).unsqueeze(0)
attn_masks_txt = pad_sequence(attn_masks_txt, batch_first=True, padding_value=0)
attn_masks_img = pad_sequence(attn_masks_img, batch_first=True, padding_value=0)
targets = torch.stack(targets, dim=0)
num_bbs = [f.size(0) for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors(img_pos_feats, num_bbs)
img_input_ids = pad_sequence(img_input_ids, batch_first=True, padding_value=0)
img_position_ids = torch.arange(0, img_input_ids.size(1), dtype=torch.long).unsqueeze(0)
(bs, max_tl) = input_ids.size()
out_size = attn_masks_img.size(1)
gather_index_teacher = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
gather_index = get_gather_index(([1] * bs), num_bbs, bs, 1, out_size)
batch = {'qids': qids, 'txts': {'input_ids': input_ids, 'position_ids': position_ids, 'attention_mask': attn_masks_txt, 'img_feat': None, 'img_pos_feat': None, 'img_masks': None, 'gather_index': None}, 'imgs': {'input_ids': img_input_ids, 'position_ids': img_position_ids, 'attention_mask': attn_masks_img, 'img_feat': img_feat, 'img_pos_feat': img_pos_feat, 'img_masks': None, 'gather_index': gather_index}, 'gather_index_teacher': gather_index_teacher, 'targets': targets}
return batch |
def y_true_header(outcome, underscore=False):
return ((str(outcome) + ('_' if underscore else '-')) + 'y_true0') |
class TestNativeCheckpointableIterator(unittest.TestCase, TestCheckpointableIterator):
def setUp(self):
self.expected_result = list(range(53))
self.iterator = NativeCheckpointableIterator(self.expected_result)
def test_iterator_exception(self):
self.assertRaises(ValueError, NativeCheckpointableIterator, iter(range(10))) |
def resize_n_crop(image, M, dsize=112):
return warp_affine(image, M, dsize=(dsize, dsize), align_corners=True) |
class GMAUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128):
super().__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=((128 + hidden_dim) + hidden_dim))
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, (64 * 9), 1, padding=0))
self.aggregator = Aggregate(args=self.args, dim=128, dim_head=128, heads=1)
def forward(self, net, inp, corr, flow, attention):
motion_features = self.encoder(flow, corr)
motion_features_global = self.aggregator(attention, motion_features)
inp_cat = torch.cat([inp, motion_features, motion_features_global], dim=1)
net = self.gru(net, inp_cat)
delta_flow = self.flow_head(net)
mask = (0.25 * self.mask(net))
return (net, mask, delta_flow) |
def _find_miopen_config(rocm_install_path):
def miopen_version_numbers(path):
possible_version_files = ['include/miopen/version.h', 'miopen/include/miopen/version.h']
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if (not version_file):
raise ConfigError('MIOpen version file "{}" not found'.format(version_file))
major = _get_header_version(version_file, 'MIOPEN_VERSION_MAJOR')
minor = _get_header_version(version_file, 'MIOPEN_VERSION_MINOR')
patch = _get_header_version(version_file, 'MIOPEN_VERSION_PATCH')
return (major, minor, patch)
(major, minor, patch) = miopen_version_numbers(rocm_install_path)
miopen_config = {'miopen_version_number': _get_composite_version_number(major, minor, patch)}
return miopen_config |
def preprocess(image):
image = tf.image.resize(image, (346, 346))
image = tf.image.crop_to_bounding_box(image, ((346 - 289) // 2), ((346 - 289) // 2), 289, 289)
return image |
def load_data(fpath, entities, w2i, system_acts):
data = []
with open(fpath, 'r') as f:
lines = f.readlines()
(x, y, c, b, p, f) = ([], [], [], [], [], [])
context = ([0] * len(entities.keys()))
for (idx, l) in enumerate(lines):
l = l.rstrip()
if (l == ''):
data.append((x, y, c, b, p, f))
(x, y, c, b, p, f) = ([], [], [], [], [], [])
context = ([0] * len(entities.keys()))
else:
ls = l.split('\t')
t_u = ls[0].split(' ', 1)
uttr = t_u[1].split(' ')
update_context(context, uttr, entities)
act_filter = generate_act_filter(len(system_acts), context)
bow = get_bow(uttr, w2i)
sys_act = g.SILENT
if (len(ls) == 2):
sys_act = ls[1]
sys_act = re.sub('resto_\\S+', '', sys_act)
if sys_act.startswith('api_call'):
sys_act = 'api_call'
else:
continue
x.append(uttr)
if (len(y) == 0):
p.append(g.SILENT)
else:
p.append(y[(- 1)])
y.append(sys_act)
c.append(copy.deepcopy(context))
b.append(bow)
f.append(act_filter)
return (data, system_acts) |
class BaseExp(metaclass=ABCMeta):
def __init__(self):
self.seed = None
self.output_dir = './YOLOX_outputs'
self.print_interval = 100
self.eval_interval = 10
def get_model(self) -> Module:
pass
def get_data_loader(self, batch_size: int, is_distributed: bool) -> Dict[(str, torch.utils.data.DataLoader)]:
pass
def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer:
pass
def get_lr_scheduler(self, lr: float, iters_per_epoch: int, **kwargs) -> LRScheduler:
pass
def get_evaluator(self):
pass
def eval(self, model, evaluator, weights):
pass
def __repr__(self):
table_header = ['keys', 'values']
exp_table = [(str(k), pprint.pformat(v)) for (k, v) in vars(self).items() if (not k.startswith('_'))]
return tabulate(exp_table, headers=table_header, tablefmt='fancy_grid')
def merge(self, cfg_list):
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
if hasattr(self, k):
src_value = getattr(self, k)
src_type = type(src_value)
if ((src_value is not None) and (src_type != type(v))):
try:
v = src_type(v)
except Exception:
v = ast.literal_eval(v)
setattr(self, k, v) |
def load_optimized_unet(cache_dir=None, unet_attributes=None, accelerator='openvino', ipex=True, precision='float32', device='CPU', low_memory=False, lora_name=None, additional_suffix=None):
t_start = time.perf_counter()
if ((cache_dir is None) and (unet_attributes is None)):
print(f'You should provide either unet_attributes or cache_dir.')
return None
if (cache_dir is None):
cache_dir = unet_attributes['config']._name_or_path
if (unet_attributes is None):
attr_file_path = os.path.join(cache_dir, 'attrs.pkl')
if os.path.exists(attr_file_path):
import pickle
with open(attr_file_path, 'rb') as f:
unet_attributes = pickle.load(f)
elif os.path.exists(os.path.join(cache_dir, 'config.json')):
import json
from diffusers.configuration_utils import FrozenDict
with open(os.path.join(cache_dir, 'config.json')) as f:
unet_attributes = {}
conf = json.load(f)
conf_obj = FrozenDict(conf)
unet_attributes['config'] = conf_obj
unet_attributes['conv_in_in_channels'] = unet_attributes['config'].in_channels
unet_attributes['in_channels'] = unet_attributes['config'].in_channels
else:
print(f'Cannot find the unet attributes file or config.json, please provide unet_attributes or config.json.')
conv_in = OrderedDict()
conv_in.in_channels = unet_attributes['conv_in_in_channels']
(nano_unet, expect_path) = try_load_existing_model(unet_attributes, cache_dir, accelerator=accelerator, ipex=ipex, precision=precision, low_memory=low_memory, device=device, additional_suffix=additional_suffix, lora_name=lora_name)
t_end = time.perf_counter()
if (nano_unet is None):
raise Exception(f'You have to download the optimized nano unet models. Expected path: {expect_path}')
else:
print(f'Load unet in {(t_end - t_start)}s')
setattr(nano_unet, 'conv_in', conv_in)
setattr(nano_unet.config, '_name_or_path', cache_dir)
return nano_unet |
class MCLayer(nn.Module):
def __init__(self, size_in, size_out):
super().__init__()
(self.size_in, self.size_out) = (size_in, size_out)
weights = torch.Tensor(size_out, size_in)
self.weights = nn.Parameter(weights)
bias = torch.Tensor(size_out)
self.bias = nn.Parameter(bias)
nn.init.kaiming_uniform_(self.weights, a=math.sqrt(5))
(fan_in, _) = nn.init._calculate_fan_in_and_fan_out(self.weights)
bound = (1 / math.sqrt(fan_in))
nn.init.uniform_(self.bias, (- bound), bound)
def forward(self, x, mask):
tmp = (self.weights * mask.t())
w_times_x = torch.mm(x, tmp.t())
return torch.add(w_times_x, self.bias) |
def getTrainIndex(n):
trainIndex = list()
for i in range((n * n)):
row = math.floor((i / n))
col = np.mod(i, n)
if (((row % 2) == 0) or ((col % 2) == 0)):
trainIndex.append(i)
return trainIndex |
class ResEncoder(nn.Module):
def __init__(self, in_channels, out_channels):
super(ResEncoder, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=False)
self.conv1x1 = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
residual = self.conv1x1(x)
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out += residual
out = self.relu(out)
return out |
def merge_hparams(policy: dict, hparams: dict):
op = PIPELINES.get(policy['type'])
assert (op is not None), f"""Invalid policy type "{policy['type']}"."""
for (key, value) in hparams.items():
if (policy.get(key, None) is not None):
continue
if (key in inspect.getfullargspec(op.__init__).args):
policy[key] = value
return policy |
def main():
for epoch in range(start_epoch, (start_epoch + 40)):
(train_loss, train_err) = train(epoch)
(test_loss, test_err) = test(epoch)
draw_curve(epoch, train_loss, train_err, test_loss, test_err)
if (((epoch + 1) % 20) == 0):
lr_decay() |
class IPAdapterXL(IPAdapter):
def generate(self, pil_image, prompt=None, negative_prompt=None, scale=1.0, num_samples=4, seed=(- 1), num_inference_steps=30, **kwargs):
self.set_scale(scale)
if isinstance(pil_image, Image.Image):
num_prompts = 1
else:
num_prompts = len(pil_image)
if (prompt is None):
prompt = 'best quality, high quality'
if (negative_prompt is None):
negative_prompt = 'monochrome, lowres, bad anatomy, worst quality, low quality'
if (not isinstance(prompt, List)):
prompt = ([prompt] * num_prompts)
if (not isinstance(negative_prompt, List)):
negative_prompt = ([negative_prompt] * num_prompts)
(image_prompt_embeds, uncond_image_prompt_embeds) = self.get_image_embeds(pil_image)
(bs_embed, seq_len, _) = image_prompt_embeds.shape
image_prompt_embeds = image_prompt_embeds.repeat(1, num_samples, 1)
image_prompt_embeds = image_prompt_embeds.view((bs_embed * num_samples), seq_len, (- 1))
uncond_image_prompt_embeds = uncond_image_prompt_embeds.repeat(1, num_samples, 1)
uncond_image_prompt_embeds = uncond_image_prompt_embeds.view((bs_embed * num_samples), seq_len, (- 1))
with torch.inference_mode():
(prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds) = self.pipe.encode_prompt(prompt, num_images_per_prompt=num_samples, do_classifier_free_guidance=True, negative_prompt=negative_prompt)
prompt_embeds = torch.cat([prompt_embeds, image_prompt_embeds], dim=1)
negative_prompt_embeds = torch.cat([negative_prompt_embeds, uncond_image_prompt_embeds], dim=1)
generator = (torch.Generator(self.device).manual_seed(seed) if (seed is not None) else None)
images = self.pipe(prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, num_inference_steps=num_inference_steps, generator=generator, **kwargs).images
return images |
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while (not time_step.is_last()):
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = (total_return / num_episodes)
return avg_return.numpy()[0] |
def class2onehot(idx, class_num):
assert (torch.max(idx).item() < class_num)
onehot = torch.zeros(idx.size(0), class_num).to(idx.device)
onehot.scatter_(1, idx, 1)
return onehot |
class GTScaleDown(object):
def __init__(self, factor=8):
self.factor = factor
def __call__(self, img):
(w, h) = img.size
if (self.factor == 1):
return img
tmp = ((np.array(img.resize(((w // self.factor), (h // self.factor)), Image.BICUBIC)) * self.factor) * self.factor)
img = Image.fromarray(tmp)
return img |
class history():
def __init__(self):
self.num_runs = int(0)
self.total_num_search = int(0)
self.fx = np.zeros(MAX_SEARCH, dtype=float)
self.chosen_actions = np.zeros(MAX_SEARCH, dtype=int)
self.terminal_num_run = np.zeros(MAX_SEARCH, dtype=int)
self.time_total_ = np.zeros(MAX_SEARCH, dtype=float)
self.time_update_predictor_ = np.zeros(MAX_SEARCH, dtype=float)
self.time_get_action_ = np.zeros(MAX_SEARCH, dtype=float)
self.time_run_simulator_ = np.zeros(MAX_SEARCH, dtype=float)
def time_total(self):
return copy.copy(self.time_total_[0:self.num_runs])
def time_update_predictor(self):
return copy.copy(self.time_update_predictor_[0:self.num_runs])
def time_get_action(self):
return copy.copy(self.time_get_action_[0:self.num_runs])
def time_run_simulator(self):
return copy.copy(self.time_run_simulator_[0:self.num_runs])
def write(self, t, action, time_total=None, time_update_predictor=None, time_get_action=None, time_run_simulator=None):
N = utility.length_vector(t)
st = self.total_num_search
en = (st + N)
self.terminal_num_run[self.num_runs] = en
self.fx[st:en] = t
self.chosen_actions[st:en] = action
self.num_runs += 1
self.total_num_search += N
if (time_total is None):
time_total = np.zeros(N, dtype=float)
self.time_total_[st:en] = time_total
if (time_update_predictor is None):
time_update_predictor = np.zeros(N, dtype=float)
self.time_update_predictor_[st:en] = time_update_predictor
if (time_get_action is None):
time_get_action = np.zeros(N, dtype=float)
self.time_get_action_[st:en] = time_get_action
if (time_run_simulator is None):
time_run_simulator = np.zeros(N, dtype=float)
self.time_run_simulator_[st:en] = time_run_simulator
def export_sequence_best_fx(self):
best_fx = np.zeros(self.num_runs, dtype=float)
best_actions = np.zeros(self.num_runs, dtype=int)
for n in range(self.num_runs):
index = np.argmax(self.fx[0:self.terminal_num_run[n]])
best_actions[n] = self.chosen_actions[index]
best_fx[n] = self.fx[index]
return (best_fx, best_actions)
def export_all_sequence_best_fx(self):
best_fx = np.zeros(self.total_num_search, dtype=float)
best_actions = np.zeros(self.total_num_search, dtype=int)
best_fx[0] = self.fx[0]
best_actions[0] = self.chosen_actions[0]
for n in range(1, self.total_num_search):
if (best_fx[(n - 1)] < self.fx[n]):
best_fx[n] = self.fx[n]
best_actions[n] = self.chosen_actions[n]
else:
best_fx[n] = best_fx[(n - 1)]
best_actions[n] = best_actions[(n - 1)]
return (best_fx, best_actions)
def save(self, filename):
N = self.total_num_search
M = self.num_runs
np.savez_compressed(filename, num_runs=M, total_num_search=N, fx=self.fx[0:N], chosen_actions=self.chosen_actions[0:N], terminal_num_run=self.terminal_num_run[0:M])
def load(self, filename):
data = np.load(filename)
M = data['num_runs']
N = data['total_num_search']
self.num_runs = M
self.total_num_search = N
self.fx[0:N] = data['fx']
self.chosen_actions[0:N] = data['chosen_actions']
self.terminal_num_run[0:M] = data['terminal_num_run'] |
def train_cv_poison(helper, model, poison_optimizer, criterion):
total_loss = 0.0
num_data = 0.0
for x1 in helper.poisoned_train_data:
(inputs_p, labels_p) = x1
inputs = inputs_p
for pos in range(labels_p.size(0)):
labels_p[pos] = helper.params['poison_label_swap']
labels = labels_p
(inputs, labels) = (inputs.cuda(), labels.cuda())
poison_optimizer.zero_grad()
output = model(inputs)
loss = criterion(output, labels)
total_loss = (loss.item() * inputs.size(0))
num_data += inputs.size(0)
poison_optimizer.zero_grad()
return (total_loss / float(num_data)) |
_model
def scalable_vit_small(pretrained=False, **kwargs):
img_size = 224
model = ScalableViT(img_size=img_size, patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), depths=[2, 2, 20, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], block_cls=ScalableViTBlock, c_ratios=[1.25, 1.25, 1.25, 1], **kwargs)
model.default_cfg = _cfg()
return model |
_module()
class ClsHead(nn.Module):
def __init__(self, num_classes: int, in_channels: int, mlps: List[int]=[256], norm_args: dict=None, act_args: dict={'act': 'relu'}, dropout: float=0.5, global_feat: str=None, point_dim: int=2, **kwargs):
super().__init__()
if kwargs:
logging.warning(f'kwargs: {kwargs} are not used in {__class__.__name__}')
self.global_feat = (global_feat.split(',') if (global_feat is not None) else None)
self.point_dim = point_dim
in_channels = ((len(self.global_feat) * in_channels) if (global_feat is not None) else in_channels)
if (mlps is not None):
mlps = (([in_channels] + mlps) + [num_classes])
else:
mlps = [in_channels, num_classes]
heads = []
for i in range((len(mlps) - 2)):
heads.append(create_linearblock(mlps[i], mlps[(i + 1)], norm_args=norm_args, act_args=act_args))
if dropout:
heads.append(nn.Dropout(dropout))
heads.append(create_linearblock(mlps[(- 2)], mlps[(- 1)], act_args=None))
self.head = nn.Sequential(*heads)
def forward(self, end_points):
if (self.global_feat is not None):
global_feats = []
for preprocess in self.global_feat:
if ('max' in preprocess):
global_feats.append(torch.max(end_points, dim=self.point_dim, keepdim=False)[0])
elif (preprocess in ['avg', 'mean']):
global_feats.append(torch.mean(end_points, dim=self.point_dim, keepdim=False))
end_points = torch.cat(global_feats, dim=1)
logits = self.head(end_points)
return logits |
def vilmedic_collate(batch, multi_image=None):
if ((not multi_image) or (multi_image == 1)):
return {'images': torch.stack([s['image'][0] for s in batch]), 'images_mask': None}
new_batch = []
new_masks = []
for sample in batch:
sample_images = sample['image']
if (len(sample_images) > multi_image):
sample_images = sample_images[:multi_image]
if (len(sample_images) < multi_image):
first_image = sample_images[0]
for _ in range((multi_image - len(sample_images))):
sample_images.append(first_image.new_zeros(first_image.size()))
sample_images = torch.cat([s.unsqueeze(dim=0) for s in sample_images], dim=0)
sample_mask = (sample_images.sum(dim=(1, 2, 3)) != 0)
new_batch.append(sample_images)
new_masks.append(sample_mask)
collated = {'images': torch.stack(new_batch), 'images_mask': torch.stack(new_masks)}
return collated |
class BasicTokenizer(object):
def __init__(self, do_lower_case=True):
self.do_lower_case = do_lower_case
def tokenize(self, text):
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output) |
def cstr(arg, arg_name, default, custom_str=False):
not_default = (arg != default)
if (not custom_str):
custom_str = f'_{arg_name}{arg}'
return (custom_str if not_default else '') |
def preresnetbc26b(**kwargs):
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name='preresnetbc26b', **kwargs) |
def export_intrinsics(save_root: Path, overwrite: bool=False) -> None:
out_dir = (save_root / 'calibs')
if ((not overwrite) and out_dir.is_dir()):
print(f'-> Skipping LMDB calibrations...')
return
print(f"""-> Exporting intrinsics "{(save_root / 'calibs')}"...""")
data = {seq: stv.load_intrinsics(seq) for seq in stv.get_seqs()}
write_label_database(data, (save_root / 'calibs')) |
.parametrize('gpu2gpu', [False, True])
def test_rl_vectorized_envs(gpu2gpu):
import habitat_sim
if (gpu2gpu and (not habitat_sim.cuda_enabled)):
pytest.skip('GPU-GPU requires CUDA')
(configs, datasets) = _load_test_data()
for config in configs:
config.defrost()
config.SIMULATOR.HABITAT_SIM_V0.GPU_GPU = gpu2gpu
config.freeze()
num_envs = len(configs)
env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
with habitat.VectorEnv(make_env_fn=make_rl_env, env_fn_args=env_fn_args) as envs:
envs.reset()
for i in range((2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS)):
outputs = envs.step(sample_non_stop_action(envs.action_spaces[0], num_envs))
(observations, rewards, dones, infos) = [list(x) for x in zip(*outputs)]
assert (len(observations) == num_envs)
assert (len(rewards) == num_envs)
assert (len(dones) == num_envs)
assert (len(infos) == num_envs)
tiled_img = envs.render(mode='rgb_array')
new_height = int(np.ceil(np.sqrt(NUM_ENVS)))
new_width = int(np.ceil((float(NUM_ENVS) / new_height)))
print(f'observations: {observations}')
(h, w, c) = observations[0]['rgb'].shape
assert (tiled_img.shape == ((h * new_height), (w * new_width), c)), 'vector env render is broken'
if (((i + 1) % configs[0].ENVIRONMENT.MAX_EPISODE_STEPS) == 0):
assert all(dones), 'dones should be true after max_episode steps' |
class TensorflowGraph(tf.Graph):
def __init__(self, flags):
super().__init__()
self.name = ''
self.dropout_rate = flags.dropout_rate
self.activator = flags.activator
self.batch_norm = flags.batch_norm
self.cnn_size = flags.cnn_size
self.cnn_stride = 1
self.initializer = flags.initializer
self.weight_dev = flags.weight_dev
self.is_training = None
self.dropout = False
self.saver = None
self.summary_op = None
self.train_writer = None
self.test_writer = None
self.enable_log = flags.enable_log
self.save_weights = (flags.save_weights and flags.enable_log)
self.save_images = (flags.save_images and flags.enable_log)
self.save_images_num = flags.save_images_num
self.save_meta_data = (flags.save_meta_data and flags.enable_log)
self.log_weight_image_num = 32
self.checkpoint_dir = flags.checkpoint_dir
self.tf_log_dir = flags.tf_log_dir
self.Weights = []
self.Biases = []
self.features = ''
self.H = []
self.receptive_fields = 0
self.complexity = 0
self.pix_per_input = 1
self.init_session(flags.gpu_device_id)
def init_session(self, device_id=0):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(device_id)
print('Session and graph initialized.')
self.sess = tf.InteractiveSession(config=config, graph=self)
def init_all_variables(self):
self.sess.run(tf.global_variables_initializer())
print('Model initialized.')
def build_activator(self, input_tensor, features: int, activator='', leaky_relu_alpha=0.1, base_name=''):
features = int(features)
if ((activator is None) or ''):
return
elif (activator == 'relu'):
output = tf.nn.relu(input_tensor, name=(base_name + '_relu'))
elif (activator == 'sigmoid'):
output = tf.nn.sigmoid(input_tensor, name=(base_name + '_sigmoid'))
elif (activator == 'tanh'):
output = tf.nn.tanh(input_tensor, name=(base_name + '_tanh'))
elif (activator == 'leaky_relu'):
output = tf.maximum(input_tensor, (leaky_relu_alpha * input_tensor), name=(base_name + '_leaky'))
elif (activator == 'prelu'):
with tf.variable_scope('prelu'):
alphas = tf.Variable(tf.constant(0.1, shape=[features]), name=(base_name + '_prelu'))
if self.save_weights:
util.add_summaries('prelu_alpha', self.name, alphas, save_stddev=False, save_mean=False)
output = (tf.nn.relu(input_tensor) + (tf.multiply(alphas, (input_tensor - tf.abs(input_tensor))) * 0.5))
elif (activator == 'selu'):
output = tf.nn.selu(input_tensor, name=(base_name + '_selu'))
else:
raise NameError(('Not implemented activator:%s' % activator))
self.complexity += (self.pix_per_input * features)
return output
def conv2d(self, input_tensor, w, stride, bias=None, use_batch_norm=False, name=''):
output = tf.nn.conv2d(input_tensor, w, strides=[1, stride, stride, 1], padding='SAME', name=(name + '_conv'))
self.complexity += (self.pix_per_input * int((((w.shape[0] * w.shape[1]) * w.shape[2]) * w.shape[3])))
if (bias is not None):
output = tf.add(output, bias, name=(name + '_add'))
self.complexity += (self.pix_per_input * int(bias.shape[0]))
if use_batch_norm:
output = tf.layers.batch_normalization(output, training=self.is_training, name='BN')
return output
def build_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num, use_bias=False, activator=None, use_batch_norm=False, dropout_rate=1.0):
with tf.variable_scope(name):
w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num], stddev=self.weight_dev, name='conv_W', initializer=self.initializer)
b = (util.bias([output_feature_num], name='conv_B') if use_bias else None)
h = self.conv2d(input_tensor, w, self.cnn_stride, bias=b, use_batch_norm=use_batch_norm, name=name)
if (activator is not None):
h = self.build_activator(h, output_feature_num, activator, base_name=name)
if (dropout_rate < 1.0):
h = tf.nn.dropout(h, rate=(1 - self.dropout), name='dropout')
self.H.append(h)
if self.save_weights:
util.add_summaries('weight', self.name, w, save_stddev=True, save_mean=True)
util.add_summaries('output', self.name, h, save_stddev=True, save_mean=True)
if use_bias:
util.add_summaries('bias', self.name, b, save_stddev=True, save_mean=True)
if (self.save_images and (cnn_size > 1)):
util.log_cnn_weights_as_images(self.name, w, max_outputs=self.save_images_num)
if (self.receptive_fields == 0):
self.receptive_fields = cnn_size
else:
self.receptive_fields += (cnn_size - 1)
self.features += ('%d ' % output_feature_num)
self.Weights.append(w)
if use_bias:
self.Biases.append(b)
return h
def depthwise_separable_conv2d(self, input_tensor, w, stride, channel_multiplier=1, bias=None, use_batch_norm=False, name=''):
depthwise_filter = util.weight([int(w.shape[0]), int(w.shape[1]), int(w.shape[2]), channel_multiplier], stddev=self.weight_dev, name='depthwise_W', initializer=self.initializer)
pointwise_filter = util.weight([1, 1, (channel_multiplier * int(w.shape[2])), int(w.shape[3])], stddev=self.weight_dev, name='pointwise_W', initializer=self.initializer)
output = tf.nn.separable_conv2d(input_tensor, depthwise_filter, pointwise_filter, strides=[1, stride, stride, 1], padding='SAME', name=(name + '_conv'))
self.complexity += ((self.pix_per_input * int((((w.shape[0] * w.shape[1]) * w.shape[2]) * channel_multiplier))) + (self.pix_per_input * int((w.shape[2] * w.shape[3]))))
if (bias is not None):
output = tf.add(output, bias, name=(name + '_add'))
self.complexity += (self.pix_per_input * int(bias.shape[0]))
if use_batch_norm:
output = tf.layers.batch_normalization(output, training=self.is_training, name='BN')
return output
def build_depthwise_separable_conv(self, name, input_tensor, cnn_size, input_feature_num, output_feature_num, use_bias=False, activator=None, use_batch_norm=False, dropout_rate=1.0):
with tf.variable_scope(name):
w = util.weight([cnn_size, cnn_size, input_feature_num, output_feature_num], stddev=self.weight_dev, name='conv_W', initializer=self.initializer)
b = (util.bias([output_feature_num], name='conv_B') if use_bias else None)
h = self.depthwise_separable_conv2d(input_tensor, w, self.cnn_stride, bias=b, use_batch_norm=use_batch_norm, name=name)
if (activator is not None):
h = self.build_activator(h, output_feature_num, activator, base_name=name)
if (dropout_rate < 1.0):
h = tf.nn.dropout(h, rate=(1 - self.dropout), name='dropout')
self.H.append(h)
if self.save_weights:
util.add_summaries('weight', self.name, w, save_stddev=True, save_mean=True)
util.add_summaries('output', self.name, h, save_stddev=True, save_mean=True)
if use_bias:
util.add_summaries('bias', self.name, b, save_stddev=True, save_mean=True)
if (self.save_images and (cnn_size > 1)):
util.log_cnn_weights_as_images(self.name, w, max_outputs=self.save_images_num)
if (self.receptive_fields == 0):
self.receptive_fields = cnn_size
else:
self.receptive_fields += (cnn_size - 1)
self.features += ('%d ' % output_feature_num)
self.Weights.append(w)
if use_bias:
self.Biases.append(b)
return h
def build_transposed_conv(self, name, input_tensor, scale, channels):
with tf.variable_scope(name):
w = util.upscale_weight(scale=scale, channels=channels, name='Tconv_W')
batch_size = tf.shape(input_tensor)[0]
height = (tf.shape(input_tensor)[1] * scale)
width = (tf.shape(input_tensor)[2] * scale)
h = tf.nn.conv2d_transpose(input_tensor, w, output_shape=[batch_size, height, width, channels], strides=[1, scale, scale, 1], name=name)
self.pix_per_input *= (scale * scale)
self.complexity += ((((self.pix_per_input * util.get_upscale_filter_size(scale)) * util.get_upscale_filter_size(scale)) * channels) * channels)
self.receptive_fields += 1
self.Weights.append(w)
self.H.append(h)
def build_pixel_shuffler_layer(self, name, h, scale, input_filters, output_filters, activator=None, depthwise_separable=False):
with tf.variable_scope(name):
if depthwise_separable:
self.build_depthwise_separable_conv((name + '_CNN'), h, self.cnn_size, input_filters, ((scale * scale) * output_filters), use_batch_norm=False, use_bias=True)
else:
self.build_conv((name + '_CNN'), h, self.cnn_size, input_filters, ((scale * scale) * output_filters), use_batch_norm=False, use_bias=True)
self.H.append(tf.depth_to_space(self.H[(- 1)], scale))
self.build_activator(self.H[(- 1)], output_filters, activator, base_name=name)
def copy_log_to_archive(self, archive_name):
archive_directory = ((self.tf_log_dir + '_') + archive_name)
model_archive_directory = ((archive_directory + '/') + self.name)
util.make_dir(archive_directory)
util.delete_dir(model_archive_directory)
try:
shutil.copytree(self.tf_log_dir, model_archive_directory)
print(('tensorboard log archived to [%s].' % model_archive_directory))
except OSError as e:
print(e)
print(('NG: tensorboard log archived to [%s].' % model_archive_directory))
def load_model(self, name='', trial=0, output_log=False):
if ((name == '') or (name == 'default')):
name = self.name
if (trial > 0):
filename = (((((self.checkpoint_dir + '/') + name) + '_') + str(trial)) + '.ckpt')
else:
filename = (((self.checkpoint_dir + '/') + name) + '.ckpt')
if (not os.path.isfile((filename + '.index'))):
print(('Error. [%s] is not exist!' % filename))
exit((- 1))
self.saver.restore(self.sess, filename)
if output_log:
logging.info(('Model restored [ %s ].' % filename))
else:
print(('Model restored [ %s ].' % filename))
def save_model(self, name='', trial=0, output_log=False):
if ((name == '') or (name == 'default')):
name = self.name
if (trial > 0):
filename = (((((self.checkpoint_dir + '/') + name) + '_') + str(trial)) + '.ckpt')
else:
filename = (((self.checkpoint_dir + '/') + name) + '.ckpt')
self.saver.save(self.sess, filename)
if output_log:
logging.info(('Model saved [%s].' % filename))
else:
print(('Model saved [%s].' % filename))
def build_summary_saver(self, with_saver=True):
if self.enable_log:
self.summary_op = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter((self.tf_log_dir + '/train'))
self.test_writer = tf.summary.FileWriter((self.tf_log_dir + '/test'), graph=self.sess.graph)
if with_saver:
self.saver = tf.train.Saver(max_to_keep=None) |
def visualize_result(df, filename):
ax = sns.lineplot(data=df, dashes=False)
ax.figure.savefig(filename, dpi=250)
plt.close() |
def cifar100_val_loader(dataset_name, val_batch_size, num_workers=4, pin_memory=True, normalize=None):
if (normalize is None):
normalize = transforms.Normalize(mean=mean[dataset_name], std=std[dataset_name])
val_dataset = datasets.ImageFolder('data/cifar100_org/test/{}'.format(dataset_name), transforms.Compose([transforms.ToTensor(), normalize]))
return torch.utils.data.DataLoader(val_dataset, batch_size=val_batch_size, shuffle=False, sampler=None, num_workers=num_workers, pin_memory=pin_memory) |
class MixerBlock(nn.Module):
def __init__(self, num_patches: int, num_channels: int, tokens_hidden_dim: int, channels_hidden_dim: int):
super().__init__()
self.token_mixing = nn.Sequential(nn.LayerNorm(num_channels), Rearrange('b p c -> b c p'), MLPBlock(num_patches, tokens_hidden_dim), Rearrange('b c p -> b p c'))
self.channel_mixing = nn.Sequential(nn.LayerNorm(num_channels), MLPBlock(num_channels, channels_hidden_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = (x + self.token_mixing(x))
x = (x + self.channel_mixing(x))
return x |
class MultiNLI():
def __init__(self, options):
print('preparing the dataset for training...')
self.TEXT = Field(lower=True, tokenize='spacy', batch_first=True)
self.LABEL = Field(sequential=False, unk_token=None, is_target=True)
(self.train, self.dev, self.test) = datasets.MultiNLI.splits(self.TEXT, self.LABEL)
self.TEXT.build_vocab(self.train, self.dev)
self.LABEL.build_vocab(self.train)
vector_cache_loc = '.vector_cache/multinli_vectors.pt'
if os.path.isfile(vector_cache_loc):
self.TEXT.vocab.vectors = torch.load(vector_cache_loc)
else:
self.TEXT.vocab.load_vectors('glove.840B.300d')
makedirs(os.path.dirname(vector_cache_loc))
torch.save(self.TEXT.vocab.vectors, vector_cache_loc)
(self.train_iter, self.dev_iter, self.test_iter) = Iterator.splits((self.train, self.dev, self.test), batch_size=options['batch_size'], device=options['device'], sort_key=(lambda x: len(x.premise)), sort_within_batch=False, shuffle=True)
def vocab_size(self):
return len(self.TEXT.vocab)
def out_dim(self):
return len(self.LABEL.vocab)
def labels(self):
return self.LABEL.vocab.stoi |
class TestOutput():
def test_position_raises_value_error_more(self):
output_seq = output.OutputSeq(tokens=[0, 0], n_input_tokens=1)
with pytest.raises(ValueError):
output_seq.position(position=4)
def test_position_raises_value_error_less(self):
output_seq = output.OutputSeq(tokens=[0, 0], n_input_tokens=1)
with pytest.raises(ValueError):
output_seq.position(position=0)
def test_saliency(self, output_seq_1):
actual = output_seq_1.primary_attributions(printJson=True)
expected = {'tokens': [{'token': '', 'token_id': 352, 'is_partial': True, 'type': 'input', 'value': '0.', 'position': 0}, {'token': '', 'token_id': 11, 'is_partial': True, 'type': 'input', 'value': '0.', 'position': 1}, {'token': '', 'token_id': 352, 'is_partial': True, 'type': 'input', 'value': '0.', 'position': 2}, {'token': '', 'token_id': 11, 'is_partial': True, 'type': 'input', 'value': '0.', 'position': 3}, {'token': '', 'token_id': 362, 'is_partial': True, 'type': 'output', 'value': '0', 'position': 4}], 'attributions': [[0., 0., 0., 0.]]}
assert (actual == expected)
def test_layer_position_zero_raises_valueerror(self, output_seq_1):
with pytest.raises(ValueError, match='.* set to 0*') as ex:
actual = output_seq_1.layer_predictions(position=0)
def test_layer_predictions_all_layers(self, output_seq_1):
actual = output_seq_1.layer_predictions(printJson=True, position=4)
assert (len(actual) == 6)
assert (actual[0][0]['ranking'] == 1)
assert (actual[0][0]['layer'] == 0)
def test_layer_predictions_one_layer(self, output_seq_1):
actual = output_seq_1.layer_predictions(layer=2, printJson=True, position=4)
assert (len(actual) == 1)
assert (actual[0][0]['ranking'] == 1)
assert (actual[0][0]['layer'] == 2)
def test_layer_predictions_topk(self, output_seq_1):
actual = output_seq_1.layer_predictions(layer=2, printJson=True, topk=15, position=4)
assert (len(actual) == 1)
assert (len(actual[0]) == 15)
def test_rankings(self, output_seq_1):
actual = output_seq_1.rankings(printJson=True)
assert (len(actual['output_tokens']) == 1)
assert (actual['rankings'].shape == (6, 1))
assert isinstance(int(actual['rankings'][0][0]), int)
def test_rankings_watch(self, output_seq_1):
actual = output_seq_1.rankings_watch(printJson=True, watch=[0, 0])
assert (len(actual['output_tokens']) == 2)
assert (actual['rankings'].shape == (6, 2))
assert isinstance(int(actual['rankings'][0][0]), int)
def test_nmf_raises_activations_dimension_value_error(self):
with pytest.raises(ValueError, match='.* four dimensions.*') as ex:
NMF({'layer_0': np.zeros(0)}, n_components=2)
def test_nmf_raises_value_error_same_layer(self):
with pytest.raises(ValueError, match='.* same value.*') as ex:
NMF({'layer_0': np.zeros((1, 1, 1, 1))}, n_components=2, from_layer=0, to_layer=0)
def test_nmf_raises_value_error_layer_bounds(self):
with pytest.raises(ValueError, match='.* larger.*'):
NMF({'layer_0': np.zeros((1, 1, 1, 1))}, n_components=2, from_layer=1, to_layer=0)
def test_nmf_reshape_activations_1(self):
(batch, layers, neurons, position) = (1, 6, 128, 10)
activations = np.ones((batch, layers, neurons, position))
merged_activations = NMF.reshape_activations(activations, None, None, None)
assert (merged_activations.shape == ((layers * neurons), (batch * position)))
def test_nmf_reshape_activations_2(self):
(batch, layers, neurons, position) = (2, 6, 128, 10)
activations = np.ones((batch, layers, neurons, position))
merged_activations = NMF.reshape_activations(activations, None, None, None)
assert (merged_activations.shape == ((layers * neurons), (batch * position)))
def test_nmf_explore_on_dummy_gpt(self):
lm = ecco.from_pretrained('sshleifer/tiny-gpt2', activations=True, verbose=False)
output = lm.generate('test', generate=1)
nmf = output.run_nmf()
exp = nmf.explore(printJson=True)
assert (len(exp['tokens']) == 2)
assert (np.array(exp['factors']).shape == (1, 1, 2))
def test_nmf_explore_on_dummy_bert(self):
lm = ecco.from_pretrained('julien-c/bert-xsmall-dummy', activations=True, verbose=False)
inputs = lm.to(lm.tokenizer(['test', 'hi'], padding=True, truncation=True, return_tensors='pt', max_length=512))
output = lm(inputs)
nmf = output.run_nmf()
exp = nmf.explore(printJson=True)
assert (len(exp['tokens']) == 3)
assert (np.array(exp['factors']).shape == (1, 6, 6))
def test_nmf_output_dims(self):
pass |
class ChannelAttentionBlock3d(nn.Module):
def __init__(self, in_channels):
super(ChannelAttentionBlock3d, self).__init__()
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(B, C, H, W, D) = x.size()
proj_query = x.view(B, C, (- 1)).permute(0, 2, 1)
proj_key = x.view(B, C, (- 1))
proj_judge = x.view(B, C, (- 1)).permute(0, 2, 1)
affinity1 = torch.matmul(proj_key, proj_query)
affinity2 = torch.matmul(proj_key, proj_judge)
affinity = torch.matmul(affinity1, affinity2)
affinity_new = (torch.max(affinity, (- 1), keepdim=True)[0].expand_as(affinity) - affinity)
affinity_new = self.softmax(affinity_new)
proj_value = x.view(B, C, (- 1))
weights = torch.matmul(affinity_new, proj_value)
weights = weights.view(B, C, H, W, D)
out = ((self.gamma * weights) + x)
return out |
class UserCommands(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser('login', help='Log in using the same credentials as on huggingface.co')
login_parser.set_defaults(func=(lambda args: LoginCommand(args)))
whoami_parser = parser.add_parser('whoami', help='Find out which huggingface.co account you are logged in as.')
whoami_parser.set_defaults(func=(lambda args: WhoamiCommand(args)))
logout_parser = parser.add_parser('logout', help='Log out')
logout_parser.set_defaults(func=(lambda args: LogoutCommand(args)))
repo_parser = parser.add_parser('repo', help='Deprecated: use `huggingface-cli` instead. Commands to interact with your huggingface.co repos.')
repo_subparsers = repo_parser.add_subparsers(help='Deprecated: use `huggingface-cli` instead. huggingface.co repos related commands')
repo_create_parser = repo_subparsers.add_parser('create', help='Deprecated: use `huggingface-cli` instead. Create a new repo on huggingface.co')
repo_create_parser.add_argument('name', type=str, help="Name for your model's repo. Will be namespaced under your username to build the model id.")
repo_create_parser.add_argument('--organization', type=str, help='Optional: organization namespace.')
repo_create_parser.add_argument('-y', '--yes', action='store_true', help='Optional: answer Yes to the prompt')
repo_create_parser.set_defaults(func=(lambda args: RepoCreateCommand(args))) |
def normalize_answer(s):
def white_space_fix(text):
return ' '.join(text.split())
def lower(text):
return text.lower()
return white_space_fix(lower(s)) |
def TestTraining():
a = GenerateData()
TreatedAtoms = a.AtomTypes()
PARAMS['NetNameSuffix'] = 'training_sample'
PARAMS['learning_rate'] = 1e-05
PARAMS['momentum'] = 0.95
PARAMS['max_steps'] = 15
PARAMS['batch_size'] = 100
PARAMS['test_freq'] = 5
PARAMS['tf_prec'] = 'tf.float64'
PARAMS['EnergyScalar'] = 1.0
PARAMS['GradScalar'] = (1.0 / 20.0)
PARAMS['NeuronType'] = 'sigmoid_with_param'
PARAMS['sigmoid_alpha'] = 100.0
PARAMS['KeepProb'] = [1.0, 1.0, 1.0, 1.0]
d = MolDigester(TreatedAtoms, name_='ANI1_Sym_Direct', OType_='AtomizationEnergy')
tset = TensorMolData_BP_Direct_EandG_Release(a, d, order_=1, num_indis_=1, type_='mol', WithGrad_=True)
manager = TFMolManage('', tset, False, 'fc_sqdiff_BP_Direct_EandG_SymFunction')
PARAMS['Profiling'] = 0
manager.Train(1) |
class BaseProgressBar(object):
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if (epoch is not None):
self.prefix += 'epoch {:03d}'.format(epoch)
if (prefix is not None):
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
raise NotImplementedError
def print(self, stats, tag=None, step=None):
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join((((key + '=') + stats[key].strip()) for key in stats.keys()))
def _str_pipes(self, stats):
return ' | '.join((((key + ' ') + stats[key].strip()) for key in stats.keys()))
def _format_stats(self, stats):
postfix = OrderedDict(stats)
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix |
def main(args):
filepaths = [os.path.join(args.csv_dir, v) for v in os.listdir(args.csv_dir) if ('features' in v)]
for csv_filepath in filepaths:
util.green_print(csv_filepath)
filepath = util.create_output_path(csv_filepath)
(emb_array, labels) = util.readEmb_csv(csv_filepath)
total_num = len(labels)
if (args.max_compare_num < 1):
result_file = simulator(args, emb_array, labels, total_num, filepath, args.threshold)
else:
result_file = simulator(args, emb_array, labels, args.max_compare_num, filepath, args.threshold) |
class Constraint(object):
def __init__(self, constraint=None):
self._constraint = constraint
self._ccontents = self._constraint.contents
def _get_max_force(self):
return self._ccontents.maxForce
def _set_max_force(self, f):
self._ccontents.maxForce = f
max_force = property(_get_max_force, _set_max_force, doc='The maximum force that the constraint can use to act on the two \n bodies. Defaults to infinity')
def _get_error_bias(self):
return self._ccontents.errorBias
def _set_error_bias(self, error_bias):
self._ccontents.errorBias = error_bias
error_bias = property(_get_error_bias, _set_error_bias, doc='The rate at which joint error is corrected.\n\n Defaults to pow(1.0 - 0.1, 60.0) meaning that it will correct 10% of \n the error every 1/60th of a second.')
def _get_max_bias(self):
return self._ccontents.maxBias
def _set_max_bias(self, max_bias):
self._ccontents.maxBias = max_bias
max_bias = property(_get_max_bias, _set_max_bias, doc='The maximum rate at which joint error is corrected. Defaults \n to infinity')
def _get_impulse(self):
return cpffi.cpConstraintGetImpulse(self._constraint)
impulse = property(_get_impulse, doc='Get the last impulse applied by this constraint.')
a = property((lambda self: self._a), doc='The first of the two bodies constrained')
b = property((lambda self: self._b), doc='The second of the two bodies constrained')
def activate_bodies(self):
self._a.activate()
self._b.activate()
def _set_bodies(self, a, b):
self._a = a
self._b = b
a._constraints.add(self)
b._constraints.add(self)
def __del__(self):
if (cp is not None):
cp.cpConstraintFree(self._constraint) |
def reduce_states(rssm_states: list, dim, func):
return RSSMState(*[func([getattr(state, key) for state in rssm_states], dim=dim) for key in rssm_states[0].__dict__.keys()]) |
def restore_model(pkl_file, checkpoint=None, train=False, fp16=None):
info = load_pickle(pkl_file)
init = info['init']
name = info['name']
search_in = join(CoTr.__path__[0], 'training', 'network_training')
tr = recursive_find_python_class([search_in], name, current_module='CoTr.training.network_training')
if (tr is None):
try:
import meddec
search_in = (os.path.dirname(os.path.abspath(__file__)), 'network_training')
tr = recursive_find_python_class([search_in], name, current_module='training.network_training')
except ImportError:
pass
if (tr is None):
raise RuntimeError(('Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it is not located there, please move it or change the code of restore_model. Your model trainer can be located in any directory within nnunet.trainig.network_training (search is recursive).\nDebug info: \ncheckpoint file: %s\nName of trainer: %s ' % (checkpoint, name)))
assert issubclass(tr, nnUNetTrainer), 'The network trainer was found but is not a subclass of nnUNetTrainer. Please make it so!'
trainer = tr(*init)
if (fp16 is not None):
trainer.fp16 = fp16
trainer.process_plans(info['plans'])
if (checkpoint is not None):
trainer.load_checkpoint(checkpoint, train)
return trainer |
def modelSize(net):
params = 0
for e in net.parameters():
params += np.prod(e.size())
params = int((params / 1000))
print('Network has ', params, 'K params') |
class ClassBalancedRandomSampling():
class_index_cache = None
class_num_cache = None
def sample(cls, buffer_x, buffer_y, n_smp_cls, excl_indices=None, device='cpu'):
if (excl_indices is None):
excl_indices = set()
sample_ind = torch.tensor([], device=device, dtype=torch.long)
for ind_set in cls.class_index_cache.values():
if ind_set:
valid_ind = (ind_set - excl_indices)
perm_ind = torch.randperm(len(valid_ind), device=device)
ind = torch.tensor(list(valid_ind), device=device, dtype=torch.long)[perm_ind][:n_smp_cls]
sample_ind = torch.cat((sample_ind, ind))
x = buffer_x[sample_ind]
y = buffer_y[sample_ind]
x = maybe_cuda(x)
y = maybe_cuda(y)
return (x, y, sample_ind)
def update_cache(cls, buffer_y, num_class, new_y=None, ind=None, device='cpu'):
if (cls.class_index_cache is None):
cls.class_index_cache = defaultdict(set)
cls.class_num_cache = torch.zeros(num_class, dtype=torch.long, device=device)
if (new_y is not None):
orig_y = buffer_y[ind]
for (i, ny, oy) in zip(ind, new_y, orig_y):
oy_int = oy.item()
ny_int = ny.item()
i_int = i.item()
if ((oy_int in cls.class_index_cache) and (i_int in cls.class_index_cache[oy_int])):
cls.class_index_cache[oy_int].remove(i_int)
cls.class_num_cache[oy_int] -= 1
cls.class_index_cache[ny_int].add(i_int)
cls.class_num_cache[ny_int] += 1
else:
cls_ind_cache = defaultdict(set)
for (i, c) in enumerate(buffer_y):
cls_ind_cache[c.item()].add(i)
cls.class_index_cache = cls_ind_cache |
def ReadFileGS(x_axis, tthread, batchInterval, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity):
(w, h) = (3, len(x_axis))
y = [[] for _ in range(w)]
if (isCyclic == 'true'):
for NUM_ACCESS in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('OPGS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
for NUM_ACCESS in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('OPBFS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
for NUM_ACCESS in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('OPDFS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[2].append(float(throughput))
else:
for NUM_ACCESS in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('GS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
for NUM_ACCESS in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('BFS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
for NUM_ACCESS in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathGS('DFS', inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[2].append(float(throughput))
print(y)
return y |
def download_dataset(data_path, file_ids):
for (file_name, file_id) in file_ids.items():
save_path = osp.abspath(osp.join(data_path, file_name))
if osp.exists(save_path):
user_response = input(f'{file_name} already exist. Do you want to cover it? Y/N ')
if (user_response.lower() == 'y'):
print(f'Covering {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path)
elif (user_response.lower() == 'n'):
print(f'Skipping {file_name}')
else:
raise ValueError('Wrong input. Only accpets Y/N.')
else:
print(f'Downloading {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path)
print(f'Extracting {file_name} to {save_path}')
unzip_dataset(file_name, data_path) |
class BaseModel(nn.Module):
def __init__(self, backbone: str='MiT-B0', num_classes: int=19, modals: list=['rgb', 'depth', 'event', 'lidar']) -> None:
super().__init__()
(backbone, variant) = backbone.split('-')
self.backbone = eval(backbone)(variant, modals)
self.modals = modals
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
(fan_out // m.groups)
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def init_pretrained(self, pretrained: str=None) -> None:
if pretrained:
if (len(self.modals) > 1):
load_dualpath_model(self.backbone, pretrained)
else:
checkpoint = torch.load(pretrained, map_location='cpu')
if ('state_dict' in checkpoint.keys()):
checkpoint = checkpoint['state_dict']
if ('model' in checkpoint.keys()):
checkpoint = checkpoint['model']
msg = self.backbone.load_state_dict(checkpoint, strict=False)
print(msg) |
def le_net_cifar(pretrained: bool=False, progress: bool=True, num_classes: int=10):
return LeNetCIFAR() |
class HUD(object):
def __init__(self, name, width, height):
self.name = name
self.dim = (width, height)
self._init_hud_params()
self._init_data_params()
def start(self):
def _init_hud_params(self):
font_name = ('courier' if (os.name == 'nt') else 'mono')
fonts = [x for x in pygame.font.get_fonts() if (font_name in x)]
default_font = 'ubuntumono'
mono = (default_font if (default_font in fonts) else fonts[0])
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 14)
self._header_font = pygame.font.SysFont('Arial', 14, True)
self.help = HelpText(pygame.font.Font(mono, 24), *self.dim)
self._notifications = FadingText(pygame.font.Font(pygame.font.get_default_font(), 20), (self.dim[0], 40), (0, (self.dim[1] - 40)))
def _init_data_params(self):
self.show_info = True
self.show_actor_ids = False
self._info_text = {}
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def tick(self, clock):
self._notifications.tick(clock)
def add_info(self, title, info):
self._info_text[title] = info
def render_vehicles_ids(self, vehicle_id_surface, list_actors, world_to_pixel, hero_actor, hero_transform):
vehicle_id_surface.fill(COLOR_BLACK)
if self.show_actor_ids:
vehicle_id_surface.set_alpha(150)
for actor in list_actors:
(x, y) = world_to_pixel(actor[1].location)
angle = 0
if (hero_actor is not None):
angle = ((- hero_transform.rotation.yaw) - 90)
color = COLOR_SKY_BLUE_0
if (int(actor[0].attributes['number_of_wheels']) == 2):
color = COLOR_CHOCOLATE_0
if (actor[0].attributes['role_name'] == 'hero'):
color = COLOR_CHAMELEON_0
font_surface = self._header_font.render(str(actor[0].id), True, color)
rotated_font_surface = pygame.transform.rotate(font_surface, angle)
rect = rotated_font_surface.get_rect(center=(x, y))
vehicle_id_surface.blit(rotated_font_surface, rect)
return vehicle_id_surface
def render(self, display):
if self.show_info:
info_surface = pygame.Surface((240, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
i = 0
for (title, info) in self._info_text.items():
if (not info):
continue
surface = self._header_font.render(title, True, COLOR_ALUMINIUM_0).convert_alpha()
display.blit(surface, ((8 + (bar_width / 2)), ((18 * i) + v_offset)))
v_offset += 12
i += 1
for item in info:
if ((v_offset + 18) > self.dim[1]):
break
if isinstance(item, list):
if (len(item) > 1):
points = [((x + 8), ((v_offset + 8) + ((1.0 - y) * 30))) for (x, y) in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, (v_offset + 8)), (6, 6))
pygame.draw.rect(display, COLOR_ALUMINIUM_0, rect, (0 if item[1] else 1))
else:
rect_border = pygame.Rect((bar_h_offset, (v_offset + 8)), (bar_width, 6))
pygame.draw.rect(display, COLOR_ALUMINIUM_0, rect_border, 1)
f = ((item[1] - item[2]) / (item[3] - item[2]))
if (item[2] < 0.0):
rect = pygame.Rect(((bar_h_offset + (f * (bar_width - 6))), (v_offset + 8)), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, (v_offset + 8)), ((f * bar_width), 6))
pygame.draw.rect(display, COLOR_ALUMINIUM_0, rect)
item = item[0]
if item:
surface = self._font_mono.render(item, True, COLOR_ALUMINIUM_0).convert_alpha()
display.blit(surface, (8, ((18 * i) + v_offset)))
v_offset += 18
v_offset += 24
self._notifications.render(display)
self.help.render(display) |
def preprocess_image(image, device):
image = ((torch.from_numpy(image).float() / 127.5) - 1)
image = rearrange(image, 'h w c -> 1 c h w')
image = image.to(device)
return image |
def frontend(x, is_training, yInput, num_filt, type):
expand_input = tf.expand_dims(x, 3)
normalized_input = tf.compat.v1.layers.batch_normalization(expand_input, training=is_training)
if ('timbral' in type):
input_pad_7 = tf.pad(normalized_input, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
if ('74' in type):
f74 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.4 * yInput))], is_training=is_training)
if ('77' in type):
f77 = timbral_block(inputs=input_pad_7, filters=int((num_filt * 128)), kernel_size=[7, int((0.7 * yInput))], is_training=is_training)
if ('temporal' in type):
s1 = tempo_block(inputs=normalized_input, filters=int((num_filt * 32)), kernel_size=[128, 1], is_training=is_training)
s2 = tempo_block(inputs=normalized_input, filters=int((num_filt * 32)), kernel_size=[64, 1], is_training=is_training)
s3 = tempo_block(inputs=normalized_input, filters=int((num_filt * 32)), kernel_size=[32, 1], is_training=is_training)
if (type == '7774timbraltemporal'):
return [f74, f77, s1, s2, s3] |
class Conv3dGaussian(ConvNdGaussianMixin, torch.nn.Conv3d):
def forward(self, input):
return self._forward_impl(input, F.conv3d) |
class ImageDataManager(DataManager):
data_type = 'image'
def __init__(self, args):
root = args.datadir
sources = args.data_train.lower().split('+')
targets = args.data_test.lower().split('+')
height = args.height
width = args.width
transforms = ['random_flip', 'random_crop']
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
use_gpu = (not args.cpu)
split_id = 0
combineall = False
batch_size_train = (args.batchid * args.batchimage)
num_instances = args.batchimage
batch_size_test = args.batchtest
workers = args.nThread
train_sampler = 'random'
cuhk03_labeled = args.cuhk03_labeled
cuhk03_classic_split = False
market1501_500k = False
if args.random_erasing:
transforms.append('random_erase')
if args.cutout:
transforms.append('cutout')
if args.sampler:
train_sampler = 'RandomIdentitySampler'
super(ImageDataManager, self).__init__(sources=sources, targets=targets, height=height, width=width, transforms=transforms, norm_mean=norm_mean, norm_std=norm_std, use_gpu=use_gpu)
print('=> Loading train (source) dataset')
trainset = []
for name in self.sources:
trainset_ = init_image_dataset(name, transform=self.transform_tr, mode='train', combineall=combineall, root=root, split_id=split_id, cuhk03_labeled=cuhk03_labeled, cuhk03_classic_split=cuhk03_classic_split, market1501_500k=market1501_500k)
trainset.append(trainset_)
trainset = sum(trainset)
self._num_train_pids = trainset.num_train_pids
self._num_train_cams = trainset.num_train_cams
train_sampler = build_train_sampler(trainset.train, train_sampler, batch_size=batch_size_train, num_instances=num_instances)
self.train_loader = DataloaderX(trainset, sampler=train_sampler, batch_size=batch_size_train, shuffle=False, num_workers=workers, pin_memory=self.use_gpu, drop_last=True)
print('=> Loading test (target) dataset')
self.testloader = {name: {'query': None, 'gallery': None} for name in self.targets}
self.testdataset = {name: {'query': None, 'gallery': None} for name in self.targets}
for name in self.targets:
queryset = init_image_dataset(name, transform=self.transform_te, mode='query', combineall=combineall, root=root, split_id=split_id, cuhk03_labeled=cuhk03_labeled, cuhk03_classic_split=cuhk03_classic_split, market1501_500k=market1501_500k)
self.testloader[name]['query'] = DataloaderX(queryset, batch_size=batch_size_test, shuffle=False, num_workers=workers, pin_memory=self.use_gpu, drop_last=False)
galleryset = init_image_dataset(name, transform=self.transform_te, mode='gallery', combineall=combineall, verbose=False, root=root, split_id=split_id, cuhk03_labeled=cuhk03_labeled, cuhk03_classic_split=cuhk03_classic_split, market1501_500k=market1501_500k)
self.testloader[name]['gallery'] = DataloaderX(galleryset, batch_size=batch_size_test, shuffle=False, num_workers=workers, pin_memory=self.use_gpu, drop_last=False)
self.query_loader = self.testloader[name]['query']
self.test_loader = self.testloader[name]['gallery']
self.galleryset = galleryset
self.queryset = queryset
self.testdataset[name]['query'] = queryset.query
self.testdataset[name]['gallery'] = galleryset.gallery
args.num_classes = self.num_train_pids
print('\n')
print(' Summary ')
print(' train : {}'.format(self.sources))
print(' # train datasets : {}'.format(len(self.sources)))
print(' # train ids : {}'.format(self.num_train_pids))
print(' # train images : {}'.format(len(trainset)))
print(' # train cameras : {}'.format(self.num_train_cams))
print(' test : {}'.format(self.targets))
print(' # query images : {}'.format(len(queryset)))
print(' # gallery images : {}'.format(len(galleryset)))
print(' ')
print('\n') |
def ReadFileSL(x_axis, batchInterval, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity):
(w, h) = (3, len(x_axis))
y = [[] for _ in range(w)]
deposit_ratio_range = [25, 50, 75]
key_skewness_range = [25, 50, 75]
abort_ratio_range = [1, 10, 100]
NUM_ITEMS_range = [12288, 122880, 1228800]
random_setting = ['deposit_ratio', 'key_skewness', 'abort_ratio', 'NUM_ITEMS']
for punctuation_interval in x_axis:
new_deposit_ratio = deposit_ratio
new_key_skewness = key_skewness
new_abort_ratio = abort_ratio
new_NUM_ITEMS = NUM_ITEMS
setting = random.choice(random_setting)
if (setting == 'deposit_ratio'):
new_deposit_ratio = random.choice(deposit_ratio_range)
elif (setting == 'key_skewness'):
new_key_skewness = random.choice(key_skewness_range)
elif (setting == 'abort_ratio'):
new_abort_ratio = random.choice(abort_ratio_range)
elif (setting == 'NUM_ITEMS'):
new_NUM_ITEMS = random.choice(NUM_ITEMS_range)
if (isCyclic == 'true'):
inputEvents = (tthread * batchInterval)
op_gs_path = getPathSL('OPGSA', inputEvents, tthread, new_NUM_ITEMS, new_deposit_ratio, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
print(op_gs_path)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
elif (isCyclic == 'false'):
inputEvents = (tthread * batchInterval)
op_gs_path = getPathSL('GSA', inputEvents, tthread, new_NUM_ITEMS, new_deposit_ratio, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
else:
print('error')
inputEvents = (tthread * batchInterval)
op_dfs_path = getPathSL('TStream', inputEvents, tthread, new_NUM_ITEMS, new_deposit_ratio, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
lines = open(op_dfs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
inputEvents = (tthread * batchInterval)
op_dfs_path = getPathSL('PAT', inputEvents, tthread, new_NUM_ITEMS, new_deposit_ratio, new_key_skewness, overlap_ratio, new_abort_ratio, isCyclic, complexity)
lines = open(op_dfs_path).readlines()
throughput = lines[0].split(': ')[1]
y[2].append(float(throughput))
print(y)
return y |
def FORCESNLPsolver_normal_solve(params_arg):
global _lib
params_py = FORCESNLPsolver_normal_params_ctypes()
for par in params_arg:
try:
if isinstance(getattr(params_py, par), ctypes.Array):
params_arg[par] = np.require(params_arg[par], dtype=FORCESNLPsolver_normal_params_types[par], requirements='F')
setattr(params_py, par, npct.as_ctypes(np.reshape(params_arg[par], np.size(params_arg[par]), order='F')))
else:
setattr(params_py, par, params_arg[par])
except:
raise ValueError((('Parameter ' + par) + ' does not have the appropriate dimensions or data type. Please use numpy arrays for parameters.'))
outputs_py = FORCESNLPsolver_normal_outputs_ctypes()
info_py = FORCESNLPsolver_normal_info()
if (sys.version_info.major == 2):
if sys.platform.startswith('win'):
fp = None
else:
fp = sys.stdout
try:
PyFile_AsFile.restype = ctypes.POINTER(FILE)
exitflag = _lib.FORCESNLPsolver_normal_solve(ctypes.byref(params_py), ctypes.byref(outputs_py), ctypes.byref(info_py), PyFile_AsFile(fp), _lib.FORCESNLPsolver_normal_casadi2forces)
except:
raise
elif (sys.version_info.major == 3):
if sys.platform.startswith('win'):
libc = ctypes.cdll.msvcrt
elif sys.platform.startswith('darwin'):
libc = ctypes.CDLL('libc.dylib')
else:
libc = ctypes.CDLL('libc.so.6')
cfopen = getattr(libc, 'fopen')
cfopen.restype = ctypes.POINTER(FILE)
cfopen.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
fp = cfopen('stdout_temp.txt'.encode('utf-8'), 'w'.encode('utf-8'))
try:
if sys.platform.startswith('win'):
exitflag = _lib.FORCESNLPsolver_normal_solve(ctypes.byref(params_py), ctypes.byref(outputs_py), ctypes.byref(info_py), None, _lib.FORCESNLPsolver_normal_casadi2forces)
else:
exitflag = _lib.FORCESNLPsolver_normal_solve(ctypes.byref(params_py), ctypes.byref(outputs_py), ctypes.byref(info_py), fp, _lib.FORCESNLPsolver_normal_casadi2forces)
libc.fclose(fp)
fptemp = open('stdout_temp.txt', 'r')
print(fptemp.read())
fptemp.close()
except:
raise
for out in FORCESNLPsolver_normal_outputs:
FORCESNLPsolver_normal_outputs[out] = npct.as_array(getattr(outputs_py, out))
return (FORCESNLPsolver_normal_outputs, int(exitflag), info_py) |
def _is_discrete(space: jaxmarl_spaces.Space) -> bool:
return isinstance(space, (gymnax_spaces.Discrete, jaxmarl_spaces.Discrete)) |
class SpladeEvaluater(Trainer):
rounding_func = torch.round
def prediction_step(self, model, inputs: Dict[(str, Union[(torch.Tensor, Any)])], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[(Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor])]:
assert (prediction_loss_only == False)
assert (ignore_keys is None)
inputs = self._prepare_inputs(inputs)
with torch.no_grad():
loss = None
with self.autocast_smart_context_manager():
logits = model(**inputs).detach().contiguous()
logits = self.rounding_func((logits * 100)).type(torch.int32)
return (loss, logits, None) |
class IndepAnisotropicGaussianUVLoss(nn.Module):
def __init__(self, sigma_lower_bound: float):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log((2 * math.pi))
def forward(self, u: torch.Tensor, v: torch.Tensor, sigma_u: torch.Tensor, kappa_u_est: torch.Tensor, kappa_v_est: torch.Tensor, target_u: torch.Tensor, target_v: torch.Tensor):
sigma2 = (F.softplus(sigma_u) + self.sigma_lower_bound)
r_sqnorm2 = ((kappa_u_est ** 2) + (kappa_v_est ** 2))
delta_u = (u - target_u)
delta_v = (v - target_v)
delta_sqnorm = ((delta_u ** 2) + (delta_v ** 2))
delta_u_r_u = (delta_u * kappa_u_est)
delta_v_r_v = (delta_v * kappa_v_est)
delta_r = (delta_u_r_u + delta_v_r_v)
delta_r_sqnorm = (delta_r ** 2)
denom2 = (sigma2 * (sigma2 + r_sqnorm2))
loss = (0.5 * (((self.log2pi + torch.log(denom2)) + (delta_sqnorm / sigma2)) - (delta_r_sqnorm / denom2)))
return loss.sum() |
.slow
def test_harmonic_oscillator_vmc_ibp(caplog):
model_omega = 5
spring_constant = 1.5
nchains = (100 * jax.local_device_count())
nburn = 100
nepochs = 100
nsteps_per_param_update = 5
std_move = 0.25
learning_rate = 0.001
(log_psi_model, params, random_particle_positions, amplitudes, key) = _make_initial_params_and_data(model_omega, nchains)
data = make_simple_position_amplitude_data(random_particle_positions, amplitudes)
local_energy_fn = qho.make_harmonic_oscillator_local_energy(spring_constant, log_psi_model.apply, local_energy_type='ibp')
(_, params, _, _) = sgd_vmc_loop_with_logging(caplog, data, params, key, nchains, nburn, nepochs, nsteps_per_param_update, std_move, learning_rate, log_psi_model, local_energy_fn, local_energy_type='ibp')
np.testing.assert_allclose(jax.tree_util.tree_leaves(params)[0], jnp.sqrt(spring_constant), atol=1.0) |
class BaseMock():
def __init__(self, *args, **kwargs):
self.base_state = mock.MagicMock()
self.base_state.bumper = False
def go_to_relative(self, *args, **kwargs):
pass |
class L2Regularization(Regularizer):
def __init__(self, model, value=0.001, filter={'parameter_name': is_not_bias, 'module': is_not_bn}, pre_op=True, post_op=False, **kwargs):
super(L2Regularization, self).__init__(model, value, filter=filter, **kwargs)
self.pre_op = pre_op
self.post_op = post_op
def pre_step(self):
if self.pre_op:
with torch.no_grad():
for (_, p) in self._named_parameters:
p.grad.add_(self.value, p)
if self.log:
logging.debug('L2 penalty of %s was applied pre optimization step', self.value)
def post_step(self):
if self.post_op:
with torch.no_grad():
for (_, p) in self._named_parameters:
p.add_((- self.value), p)
if self.log:
logging.debug('L2 penalty of %s was applied post optimization step', self.value) |
class BottomUpEnsembling(BaseEstimator):
def __init__(self, model='l2', custom_cost=None, min_size=2, jump=5, params=None):
if ((custom_cost is not None) and isinstance(custom_cost, BaseCost)):
self.cost = custom_cost
elif (params is None):
self.cost = cost_factory(model=model)
else:
self.cost = cost_factory(model=model, **params)
self.min_size = max(min_size, self.cost.min_size)
self.jump = jump
self.n_samples = None
self.signal = None
self.leaves = None
self.merge = lru_cache(maxsize=None)(self._merge)
def _grow_tree(self):
partition = [(0, self.n_samples)]
stop = False
while (not stop):
stop = True
(start, end) = max(partition, key=(lambda t: (t[1] - t[0])))
mid = ((start + end) * 0.5)
bkps = list()
for bkp in range(start, end):
if ((bkp % self.jump) == 0):
if (((bkp - start) >= self.min_size) and ((end - bkp) >= self.min_size)):
bkps.append(bkp)
if (len(bkps) > 0):
bkp = min(bkps, key=(lambda x: abs((x - mid))))
partition.remove((start, end))
partition.append((start, bkp))
partition.append((bkp, end))
stop = False
partition.sort()
leaves = list()
for (start, end) in partition:
val = self.cost.error(start, end)
leaf = Bnode(start, end, val)
leaves.append(leaf)
return leaves
def _merge(self, left, right):
assert (left.end == right.start), 'Segments are not contiguous.'
(start, end) = (left.start, right.end)
val = self.cost.error(start, end)
node = Bnode(start, end, val, left=left, right=right)
return node
def _seg(self, n_bkps=None, pen=None, epsilon=None):
leaves = list(self.leaves)
stop = False
while (not stop):
stop = True
leaves.sort(key=(lambda n: n.start))
merged = (self.merge(left, right) for (left, right) in pairwise(leaves))
try:
leaf = min(merged, key=(lambda n: n.gain))
except ValueError:
break
if (n_bkps is not None):
if (len(leaves) > (n_bkps + 1)):
stop = False
elif (pen is not None):
if (leaf.gain < pen):
stop = False
elif (epsilon is not None):
if (sum((leaf_tmp.val for leaf_tmp in leaves)) < epsilon):
stop = False
if (not stop):
leaves.remove(leaf.left)
leaves.remove(leaf.right)
leaves += [leaf]
partition = {(leaf.start, leaf.end): leaf.val for leaf in leaves}
return partition
def fit(self, signal):
self.cost.fit(signal)
self.merge.cache_clear()
if (signal.ndim == 1):
(n_samples,) = signal.shape
else:
(n_samples, _) = signal.shape
self.n_samples = n_samples
self.leaves = self._grow_tree()
return self
def predict(self, n_bkps=None, pen=None, epsilon=None):
msg = 'Give a parameter.'
assert any(((param is not None) for param in (n_bkps, pen, epsilon))), msg
partition = self._seg(n_bkps=n_bkps, pen=pen, epsilon=epsilon)
bkps = sorted((e for (s, e) in partition.keys()))
return bkps
def fit_predict(self, signal, n_bkps=None, pen=None, epsilon=None):
self.fit(signal)
return self.predict(n_bkps=n_bkps, pen=pen, epsilon=epsilon) |
def named_relevance(module, prefix='', **kwargs):
for (name, mod) in module.named_modules(prefix=prefix):
if isinstance(mod, BaseARD):
(yield (name, mod.relevance(**kwargs).detach())) |
def test_concat_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 16, 16])
concat_cell = ConcatCell(256, 256)
output = concat_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (output.size() == inputs_x.size())
output = concat_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):])
assert (output.size() == inputs_y.size())
output = concat_cell(inputs_x, inputs_y)
assert (output.size() == inputs_x.size()) |
def rla_resnet152(rla_channel=32):
print('Constructing rla_resnet152......')
model = RLA_ResNet(RLA_Bottleneck, [3, 8, 36, 3])
return model |
def power_analysis_dataset(system_scores, sample_nums=None, trial_num=1000, num_workers=32, verbose=False):
if (sample_nums is None):
sample_nums = [10, 50, 100, 200, 300, 500, 700, 1000, 10000]
systems = system_scores.keys()
systems = sorted(list(systems))
all_system_pairs = list(combinations(systems, 2))
result = []
for x in all_system_pairs:
(system1, system2) = (x[0], x[1])
print('{} vs {}'.format(system1, system2))
system1_scores = np.array(system_scores[system1])
system2_scores = np.array(system_scores[system2])
power = power_analysis(system1_scores, system2_scores, num_workers=num_workers, sample_nums=sample_nums, trial_num=trial_num, verbose=verbose)
result.append({'system1': system1, 'system2': system2, 'system1_score': system1_scores.mean(), 'system2_score': system2_scores.mean(), 'score_diff': abs((system1_scores.mean() - system2_scores.mean())), 'power': power})
diffs = [x['score_diff'] for x in result]
diffs = np.array(diffs)
pencentile = np.percentile(diffs, [0, 20, 40, 60, 80, 100])
bins = []
for i in range((len(pencentile) - 1)):
bins.append([x for x in result if ((x['score_diff'] >= pencentile[i]) and (x['score_diff'] < pencentile[(i + 1)]))])
outputs = [(['sample_num'] + [str(x) for x in range(len(bins))])]
for x in sample_nums:
new_row = [x]
for i in range(len(bins)):
power = np.mean([y['power'][x] for y in bins[i]])
new_row.append(f'{power:.4f}')
outputs.append(new_row)
print(tabulate(outputs, headers='firstrow', tablefmt='github')) |
class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, batch_norm_trainable=True, reuse_weights=None):
super(SSDMobileNetV1FeatureExtractor, self).__init__(is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams, batch_norm_trainable, reuse_weights)
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
def extract_features(self, preprocessed_inputs):
preprocessed_inputs.get_shape().assert_has_rank(4)
shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.'])
feature_map_layout = {'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [(- 1), (- 1), 512, 256, 256, 128]}
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(self._conv_hyperparams):
with slim.arg_scope([slim.batch_norm], fused=False):
with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope:
(_, image_features) = mobilenet_v1.mobilenet_v1_base(ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope)
feature_maps = feature_map_generators.multi_resolution_feature_maps(feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features)
return feature_maps.values() |
.parametrize('training', [True, False, None])
def test_cplx_concatenated_casting_float_onnx_export(training):
module = torch.nn.Sequential(casting.ConcatenatedRealToCplx(), nn.CplxIdentity(), casting.CplxToConcatenatedReal())
input = torch.randn(2, 16, 256)
do_onnx_export_test(module.float(), input.float(), training=training)
do_onnx_inference_test(module.float(), input.float(), training=training)
with pytest.xfail(reason='double is not implemented in ONNX'):
do_onnx_export_test(module.double(), input.double(), training=training)
with pytest.xfail(reason='double is not implemented in ONNX'):
do_onnx_inference_test(module.double(), input.double(), training=training) |
class VizWizDataset(VQA2Dataset):
def __init__(self, dataset_type, imdb_file_index, config, *args, **kwargs):
super().__init__(dataset_type, imdb_file_index, config, *args, **kwargs)
self._name = 'vizwiz'
def load_item(self, idx):
sample = super().load_item(idx)
sample_info = self.imdb[idx]
if ('image_name' in sample_info):
sample.image_id = sample_info['image_name']
return sample
def format_for_evalai(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for (idx, image_id) in enumerate(report.image_id):
answer_id = answers[idx].item()
if (answer_id >= answer_space_size):
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
else:
answer = self.answer_processor.idx2word(answer_id)
if (answer == self.context_processor.PAD_TOKEN):
answer = 'unanswerable'
predictions.append({'image': ('_'.join((['VizWiz'] + image_id.split('_')[2:])) + '.jpg'), 'answer': answer})
return predictions |
def reject_outliers(data, m=3):
stdev = np.std(data)
mean = np.mean(data)
mask_min = (mean - (stdev * m))
mask_max = (mean + (stdev * m))
outliers = [d for d in data if ((d < mask_min) or (d > mask_max))]
print(f'Warning: removing {len(outliers)} outliers:')
print(outliers)
return [d for d in data if ((d >= mask_min) and (d <= mask_max))] |
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
os.makedirs(default_cache_path, exist_ok=True)
model.load_state_dict(torch.load(download_from_url(model_urls['resnet50'], root=default_cache_path)))
return model |
()
def main(source: LoaderSwitch, split: str='test', model: str=None, seed: int=42):
tf.random.set_seed(seed)
codebook_model = load_model(model)
def get_reconstructed_image(batch):
x = tf.image.convert_image_dtype(batch['frames'], 'float32')
x = codebook_model(x)[0]
x = tf.clip_by_value(x, (- 1), 1)
return x
dataset = loader_to_dataset(source)
dataset = dataset.map((lambda x: dict_replace(x, 'frames', ((2 * tf.image.convert_image_dtype(x['frames'], 'float32')) - 1))))
dataset = dataset.repeat((- 1))
data_source = EnvironmentViewerDataSource(dataset, get_reconstructed_image)
viewer = Viewer(data_source)
viewer.start() |
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, **kwargs) -> nn.Module:
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
class Factory(BaseFactory):
def pt_defaults_scope_value():
return {'activation_fn': default_activation.current_value, 'batch_normalize': True, 'learned_moments_update_rate': 0.0003, 'variance_epsilon': 0.001, 'scale_after_normalization': True}
default_patch_feature_dim = 8
def __init__(self, output_channels, options):
super().__init__(output_channels, options)
self.target_input_size = [96, 96]
self.input_size = [80, 80]
def image2heatmap(self, image_tensor):
hgd = [{'type': 'conv2d', 'depth': 32, 'decoder_depth': (self.options['keypoint_num'] + 1), 'decoder_activation_fn': None}, {'type': 'conv2d', 'depth': 32}, {'type': 'skip', 'layer_num': 3}, {'type': 'pool', 'pool': 'max'}, {'type': 'conv2d', 'depth': 64}, {'type': 'conv2d', 'depth': 64}, {'type': 'skip', 'layer_num': 3}, {'type': 'pool', 'pool': 'max'}, {'type': 'conv2d', 'depth': 64}, {'type': 'conv2d', 'depth': 64}, {'type': 'skip', 'layer_num': 3}, {'type': 'pool', 'pool': 'max'}, {'type': 'conv2d', 'depth': 64}, {'type': 'conv2d', 'depth': 64}]
with pt.defaults_scope(**self.pt_defaults_scope_value()):
raw_heatmap = hourglass(image_tensor, hgd, net_type=(self.options['hourglass_type'] if ('hourglass_type' in self.options) else None))
return raw_heatmap
def image2feature(self, image_tensor):
if (self.patch_feature_dim == 0):
return None
hgd = [{'type': 'conv2d', 'depth': 32, 'decoder_depth': 64}, {'type': 'conv2d', 'depth': 64, 'decoder_depth': 64}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 128}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 256}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 512}]
with pt.defaults_scope(**self.pt_defaults_scope_value()):
feature_map = hourglass(image_tensor, hgd, net_type=(self.options['hourglass_type'] if ('hourglass_type' in self.options) else None))
return feature_map |
class SideOnly(MergeOperator):
def __call__(self, base_encoding, side_encoding, additional_encodings=[]):
return side_encoding |
class HistoryManager():
def __init__(self, coin_number, end, volume_average_days=1, volume_forward=0, online=True):
self.initialize_db()
self.__storage_period = FIVE_MINUTES
self._coin_number = coin_number
self._online = online
if self._online:
self._coin_list = CoinList(end, volume_average_days, volume_forward)
self.__volume_forward = volume_forward
self.__volume_average_days = volume_average_days
self.__coins = None
def coins(self):
return self.__coins
def initialize_db(self):
with sqlite3.connect(DATABASE_DIR) as connection:
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS History (date INTEGER, coin varchar(20), high FLOAT, low FLOAT, open FLOAT, close FLOAT, volume FLOAT, quoteVolume FLOAT, weightedAverage FLOAT,PRIMARY KEY (date, coin));')
connection.commit()
def get_global_data_matrix(self, start, end, period=300, features=('close',)):
return self.get_global_panel(start, end, period, features).values
def get_global_panel(self, start, end, period=300, features=('close',)):
start = int((start - (start % period)))
end = int((end - (end % period)))
coins = self.select_coins(start=((end - self.__volume_forward) - (self.__volume_average_days * DAY)), end=(end - self.__volume_forward))
self.__coins = coins
for coin in coins:
self.update_data(start, end, coin)
if (len(coins) != self._coin_number):
raise ValueError(('the length of selected coins %d is not equal to expected %d' % (len(coins), self._coin_number)))
logging.info(('feature type list is %s' % str(features)))
self.__checkperiod(period)
time_index = pd.to_datetime(list(range(start, (end + 1), period)), unit='s')
panel = pd.Panel(items=features, major_axis=coins, minor_axis=time_index, dtype=np.float32)
connection = sqlite3.connect(DATABASE_DIR)
try:
for (row_number, coin) in enumerate(coins):
for feature in features:
if (feature == 'close'):
sql = 'SELECT date+300 AS date_norm, close FROM History WHERE date_norm>={start} and date_norm<={end} and date_norm%{period}=0 and coin="{coin}"'.format(start=start, end=end, period=period, coin=coin)
elif (feature == 'open'):
sql = 'SELECT date+{period} AS date_norm, open FROM History WHERE date_norm>={start} and date_norm<={end} and date_norm%{period}=0 and coin="{coin}"'.format(start=start, end=end, period=period, coin=coin)
elif (feature == 'volume'):
sql = ('SELECT date_norm, SUM(volume)' + ' FROM (SELECT date+{period}-(date%{period}) AS date_norm, volume, coin FROM History) WHERE date_norm>={start} and date_norm<={end} and coin="{coin}" GROUP BY date_norm'.format(period=period, start=start, end=end, coin=coin))
elif (feature == 'high'):
sql = ('SELECT date_norm, MAX(high)' + ' FROM (SELECT date+{period}-(date%{period}) AS date_norm, high, coin FROM History) WHERE date_norm>={start} and date_norm<={end} and coin="{coin}" GROUP BY date_norm'.format(period=period, start=start, end=end, coin=coin))
elif (feature == 'low'):
sql = ('SELECT date_norm, MIN(low)' + ' FROM (SELECT date+{period}-(date%{period}) AS date_norm, low, coin FROM History) WHERE date_norm>={start} and date_norm<={end} and coin="{coin}" GROUP BY date_norm'.format(period=period, start=start, end=end, coin=coin))
else:
msg = ('The feature %s is not supported' % feature)
logging.error(msg)
raise ValueError(msg)
serial_data = pd.read_sql_query(sql, con=connection, parse_dates=['date_norm'], index_col='date_norm')
panel.loc[(feature, coin, serial_data.index)] = serial_data.squeeze()
panel = panel_fillna(panel, 'both')
finally:
connection.commit()
connection.close()
return panel
def select_coins(self, start, end):
if (not self._online):
logging.info(('select coins offline from %s to %s' % (datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M'), datetime.fromtimestamp(end).strftime('%Y-%m-%d %H:%M'))))
connection = sqlite3.connect(DATABASE_DIR)
try:
cursor = connection.cursor()
cursor.execute('SELECT coin,SUM(volume) AS total_volume FROM History WHERE date>=? and date<=? GROUP BY coin ORDER BY total_volume DESC LIMIT ?;', (int(start), int(end), self._coin_number))
coins_tuples = cursor.fetchall()
if (len(coins_tuples) != self._coin_number):
logging.error('the sqlite error happend')
finally:
connection.commit()
connection.close()
coins = []
for tuple in coins_tuples:
coins.append(tuple[0])
else:
coins = list(self._coin_list.topNVolume(n=self._coin_number).index)
logging.debug(('Selected coins are: ' + str(coins)))
return coins
def __checkperiod(self, period):
if (period == FIVE_MINUTES):
return
elif (period == FIFTEEN_MINUTES):
return
elif (period == HALF_HOUR):
return
elif (period == TWO_HOUR):
return
elif (period == FOUR_HOUR):
return
elif (period == DAY):
return
else:
raise ValueError('peroid has to be 5min, 15min, 30min, 2hr, 4hr, or a day')
def update_data(self, start, end, coin):
connection = sqlite3.connect(DATABASE_DIR)
try:
cursor = connection.cursor()
min_date = cursor.execute('SELECT MIN(date) FROM History WHERE coin=?;', (coin,)).fetchall()[0][0]
max_date = cursor.execute('SELECT MAX(date) FROM History WHERE coin=?;', (coin,)).fetchall()[0][0]
if ((min_date == None) or (max_date == None)):
self.__fill_data(start, end, coin, cursor)
else:
if ((max_date + (10 * self.__storage_period)) < end):
if (not self._online):
raise Exception('Have to be online')
self.__fill_data((max_date + self.__storage_period), end, coin, cursor)
if ((min_date > start) and self._online):
self.__fill_data(start, ((min_date - self.__storage_period) - 1), coin, cursor)
finally:
connection.commit()
connection.close()
def __fill_data(self, start, end, coin, cursor):
duration = 7819200
bk_start = start
for bk_end in range(((start + duration) - 1), end, duration):
self.__fill_part_data(bk_start, bk_end, coin, cursor)
bk_start += duration
if (bk_start < end):
self.__fill_part_data(bk_start, end, coin, cursor)
def __fill_part_data(self, start, end, coin, cursor):
chart = self._coin_list.get_chart_until_success(pair=self._coin_list.allActiveCoins.at[(coin, 'pair')], start=start, end=end, period=self.__storage_period)
logging.info(('fill %s data from %s to %s' % (coin, datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M'), datetime.fromtimestamp(end).strftime('%Y-%m-%d %H:%M'))))
for c in chart:
if (c['date'] > 0):
if (c['weightedAverage'] == 0):
weightedAverage = c['close']
else:
weightedAverage = c['weightedAverage']
if ('reversed_' in coin):
cursor.execute('INSERT INTO History VALUES (?,?,?,?,?,?,?,?,?)', (c['date'], coin, (1.0 / c['low']), (1.0 / c['high']), (1.0 / c['open']), (1.0 / c['close']), c['quoteVolume'], c['volume'], (1.0 / weightedAverage)))
else:
cursor.execute('INSERT INTO History VALUES (?,?,?,?,?,?,?,?,?)', (c['date'], coin, c['high'], c['low'], c['open'], c['close'], c['volume'], c['quoteVolume'], weightedAverage)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.