code stringlengths 101 5.91M |
|---|
def remove_skip_api(code: str) -> str:
bad_codes = ['tf.test.main()', 'tf.compat.v1.test.main()', 'disable_eager_execution()', 'disable_v2_behavior', 'InteractiveSession', 'exit()']
for bad_code in bad_codes:
code = code.replace(bad_code, '')
return code |
def set_gpu_idx():
gpu_idx = os.environ.get('GPU')
if (gpu_idx is None):
logging.warning('GPU not found in environment variable, setting manually as -1')
gpu_idx = (- 1)
else:
gpu_idx = int(gpu_idx)
return gpu_idx |
def load_model(opt='gptq'):
if ('pt' == opt):
return load_pt_model()
elif ('gptq' == opt):
return load_gptq_model()
else:
raise Exception('not supported opt: {}'.format(opt)) |
class PIDParam():
kP: float
kI: float
kD: float
antiwindup: tuple[(float, float)] = ((- 1), 1)
setpoint_minmax: tuple[(float, float)] = ((- 1), 1)
output_minmax: tuple[(float, float)] = ((- 1), 1)
def __post_init__(self):
assert (self.antiwindup[0] < self.antiwindup[1])
assert (self.setpoint_minmax[0] < self.setpoint_minmax[1])
assert (self.output_minmax[0] < self.output_minmax[1]) |
def get_caller_name():
caller_frame = inspect.stack()[2][0]
caller_method = caller_frame.f_code.co_name
try:
caller_class = caller_frame.f_locals['self'].__class__.__name__
return f'{caller_class}.{caller_method}'
except KeyError:
return caller_method |
def solve(input, pbar):
records = [input]
last_step = {}
f = {}
forbidden = {}
forbidden[input] = []
for i in range(args.trycnt):
try:
p = numpy.zeros_like(records, dtype='float64')
if (i < ((1 / 2) * args.trycnt)):
if (len(records) > 1):
p.fill((0.5 / (len(records) - 1)))
p[0] = 0.5
else:
p[0] = 1.0
else:
p.fill((1.0 / len(records)))
tmp = numpy.random.choice(records, p=p)
(success, out) = run(generate_program, temperature=1.0, max_tokens=64, thoughts=tmp, forbidden_steps=('\n'.join(forbidden[tmp]) if (len(forbidden[tmp]) > 0) else 'No Forbidden Steps\n'))
if success:
a = out['remaining_numbers'].strip().split('\n')[0].strip()
if re.search('[^0-9+\\-*/.(),=\\s]', out['next_step'].strip()):
continue
if ((not re.search('\\S', out['next_step'].strip())) or (not re.search('\\S', out['remaining_numbers'].strip()))):
continue
(_, judgement) = run(valid_program, temperature=0.1, max_tokens=128, remaining_numbers=tmp, intermediate_step=out['next_step'].strip(), valid_judgement=valid_judgement)
if (judgement['judgement'] == 'Invalid'):
continue
(_, verify_result) = run(verifier_program, temperature=0.7, max_tokens=256, remaining_numbers=a, valid_output=valid_output, is_sure=is_sure)
if is_sure(verify_result['output']):
pbar.write(f"{tmp} -- {out['next_step'].strip()} -> {a}")
tmp_steps = [verify_result['output_equation'].strip().split('\n')[0].strip()]
tmp_steps.append((out['next_step'].strip() + f' (left {a})'))
while (tmp != input):
tmp_steps.append((last_step[tmp] + f' (left {tmp})'))
tmp = f[tmp]
tmp_steps.reverse()
(_, expand_result) = run(expand_program, temepratue=0.1, max_tokens=200, input=input, intermediate_steps='\n'.join(tmp_steps))
return (True, i, expand_result['output'])
elif (verify_result['output'] == 'likely'):
a = a.strip()
if (a not in records):
forbidden[tmp].append(out['next_step'].strip())
forbidden[a] = []
records.append(a)
f[a] = tmp
last_step[a] = out['next_step'].strip()
pbar.write(f"{tmp} -- {out['next_step'].strip()} -> {a}")
except Exception as exception:
pbar.write('Something goes wrong when calling OpenAI API')
continue
return (False, args.trycnt, '') |
class CplxLinearGaussian(GaussianMixin, CplxLinear):
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias=bias)
self.log_sigma2 = torch.nn.Parameter(torch.Tensor(*self.weight.shape))
self.reset_variational_parameters()
def forward(self, input):
mu = super().forward(input)
if (not self.training):
return mu
s2 = F.linear(((input.real * input.real) + (input.imag * input.imag)), torch.exp(self.log_sigma2), None)
return (mu + (cplx.randn_like(s2) * torch.sqrt(torch.clamp(s2, 1e-08)))) |
def main():
args = parse_args()
if (args is None):
exit()
gan = DCShadowNet(args)
gan.build_model()
if (args.phase == 'test'):
gan.test()
print(' [*] Test finished!') |
def image_to_input_collated(output_size, dtype=np.float32):
def _thunk(obs_space):
def runner(x):
assert (x.shape[2] == x.shape[1]), 'Input image must be square, of the form: N,H,W,C'
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x.copy()).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return x
return (runner, spaces.Box((- 1), 1, output_size, dtype))
return _thunk |
def mock_transform(return_value, arg_list):
def mock(arg):
arg_list.append(arg)
return return_value
return mock |
def batch_norm(x, train_mode, scope='batch_norm'):
return tf.contrib.layers.batch_norm(x, epsilon=1e-05, center=True, scale=True, scope=scope, is_training=train_mode) |
class L2Problem():
def __call__(self, x: TensorList) -> TensorList:
raise NotImplementedError
def ip_input(self, a, b):
return sum((a.view((- 1)) b.view((- 1))))
def ip_output(self, a, b):
return sum((a.view((- 1)) b.view((- 1))))
def M1(self, x):
return x
def M2(self, x):
return x |
class RandomAgent(AbstractAgent):
name = 'random'
def __init__(self, env, *args, **kwargs):
super(RandomAgent, self).__init__(*args, **kwargs)
self.env = env
def fit(self, num_iter):
num_iter = 10000
for _ in xrange(num_iter):
cmd = self.env.action_space.sample()
self.env.step(cmd)
if self._break:
return |
def text_standardize(text):
text = text.replace('', '-')
text = text.replace('', '-')
text = text.replace('', '-')
text = text.replace('...', '...')
text = text.replace(' ', "'")
text = re.sub('(-+|~+|!+|"+|;+|\\?+|\\++|,+|\\)+|\\(+|\\\\+|\\/+|\\*+|\\[+|\\]+|}+|{+|\\|+|_+)', ' \\1 ', text)
text = re.sub('\\s*\\n\\s*', ' \n ', text)
text = re.sub('[^\\S\\n]+', ' ', text)
return text.strip() |
def main(cl_args):
classifier_type = cl_args.classifier_type
classifier_weight_path = cl_args.classifier_weight_path
patch_size = int(cl_args.patch_size)
stride_classifier = int(cl_args.stride_classifier)
stride_thresholding = int(cl_args.stride_thresholding)
img_path = cl_args.img_path
prediction_path = cl_args.prediction_path
device = cl_args.device
os.makedirs(prediction_path, exist_ok=True)
assert (classifier_type in ['R50', 'R101', 'R152'])
with tf.device(f'/{device}'):
weakly = WeaklySupervisedCrackSeg(classifier_type=classifier_type, classifier_weight_path=classifier_weight_path, patch_size=patch_size, stride_classifier=stride_classifier, stride_thresholding=stride_thresholding)
for filename in sorted(os.listdir(img_path)):
if (filename.endswith('.jpg') or filename.endswith('.png')):
print('Predicting File:', filename)
img = load_img(os.path.join(img_path, filename), color_mode='rgb')
img = np.array(img)
prediction = weakly.predict(img)
cv2.imwrite(os.path.join(prediction_path, (os.path.splitext(filename)[0] + '.png')), prediction) |
class MSDScaleBlock(nn.Module):
def __init__(self, in_channels_prev, in_channels, out_channels, use_bottleneck, bottleneck_factor_prev, bottleneck_factor):
super(MSDScaleBlock, self).__init__()
assert (out_channels > in_channels)
assert ((out_channels % 2) == 0)
inc_channels = (out_channels - in_channels)
mid_channels = (inc_channels // 2)
self.down_block = MSDBaseBlock(in_channels=in_channels_prev, out_channels=mid_channels, stride=2, use_bottleneck=use_bottleneck, bottleneck_factor=bottleneck_factor_prev)
self.curr_block = MSDBaseBlock(in_channels=in_channels, out_channels=mid_channels, stride=1, use_bottleneck=use_bottleneck, bottleneck_factor=bottleneck_factor)
def forward(self, x_prev, x):
y_prev = self.down_block(x_prev)
y = self.curr_block(x)
x = torch.cat((x, y_prev, y), dim=1)
return x |
class LeakyReLU(KerasLayer):
def __init__(self, alpha=0.01, input_shape=None, **kwargs):
super(LeakyReLU, self).__init__(None, float(alpha), (list(input_shape) if input_shape else None), **kwargs) |
def main(base_model_name, weights_file, image_source, predictions_file, img_format='jpg'):
if os.path.isfile(image_source):
(image_dir, samples) = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_dir, img_type='jpg')
nima = Nima(base_model_name, weights=None)
nima.build()
nima.nima_model.load_weights(weights_file)
data_generator = TestDataGenerator(samples, image_dir, 64, 10, nima.preprocessing_function(), img_format=img_format)
predictions = predict(nima.nima_model, data_generator)
for (i, sample) in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
print(json.dumps(samples, indent=2))
if (predictions_file is not None):
save_json(samples, predictions_file) |
class CountOps(AnalysisPass):
def run(self, dag):
self.property_set['count_ops'] = dag.count_ops() |
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
self.conv = unetConv2((out_size * 2), out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
for m in self.children():
if (m.__class__.__name__.find('unetConv2') != (- 1)):
continue
init_weights(m, init_type='kaiming')
def forward(self, inputs0, *input):
outputs0 = self.up(inputs0)
for i in range(len(input)):
outputs0 = torch.cat([outputs0, input[i]], 1)
return self.conv(outputs0) |
class TFHubertForCTC(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def diapreresnet56_cifar100(num_classes=100, **kwargs):
return get_diapreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name='diapreresnet56_cifar100', **kwargs) |
class PConvModule(nn.Module):
def __init__(self, in_channels=256, out_channels=256, kernel_size=[3, 3, 3], dilation=[1, 1, 1], groups=[1, 1, 1], iBN=False, part_deform=False):
super(PConvModule, self).__init__()
self.iBN = iBN
self.Pconv = nn.ModuleList()
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[0], dilation=dilation[0], groups=groups[0], padding=((kernel_size[0] + ((dilation[0] - 1) * 2)) // 2), part_deform=part_deform))
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[1], dilation=dilation[1], groups=groups[1], padding=((kernel_size[1] + ((dilation[1] - 1) * 2)) // 2), part_deform=part_deform))
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[2], dilation=dilation[2], groups=groups[2], padding=((kernel_size[2] + ((dilation[2] - 1) * 2)) // 2), stride=2, part_deform=part_deform))
if self.iBN:
self.bn = nn.BatchNorm2d(256)
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
for m in self.Pconv:
init.normal_(m.weight.data, 0, 0.01)
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
next_x = []
for (level, feature) in enumerate(x):
temp_fea = self.Pconv[1](level, feature)
if (level > 0):
temp_fea += self.Pconv[2](level, x[(level - 1)])
if (level < (len(x) - 1)):
temp_fea += F.upsample_bilinear(self.Pconv[0](level, x[(level + 1)]), size=[temp_fea.size(2), temp_fea.size(3)])
next_x.append(temp_fea)
if self.iBN:
next_x = iBN(next_x, self.bn)
next_x = [self.relu(item) for item in next_x]
return next_x |
class TestPytorchModel(unittest.TestCase):
framework = 'pytorch'
model = torchvision.models.quantization.resnet18()
lpot_model = MODELS['pytorch'](model)
def test_Model(self):
model = torchvision.models.quantization.resnet18()
inc_model = INCModel(model)
self.assertTrue(isinstance(inc_model, PyTorchModel))
def test_get_all_weight_name(self):
assert (len(list(self.lpot_model.get_all_weight_names())) == 62)
def test_get_weight(self):
for (name, param) in self.model.named_parameters():
if (name == 'layer4.1.conv2.weight'):
param.data.fill_(0.0)
if (name == 'fc.bias'):
param.data.fill_(0.1)
assert (int(torch.sum(self.lpot_model.get_weight('layer4.1.conv2.weight'))) == 0)
assert torch.allclose(torch.sum(torch.tensor(self.lpot_model.get_weight('fc.bias'))), torch.tensor(100.0))
def test_get_input(self):
model = MODELS['pytorch'](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 256, 256).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.lpot_model.update_weights('fc.bias', torch.zeros([1000]))
assert (int(torch.sum(self.lpot_model.get_weight('fc.bias'))) == 0)
def test_gradient(self):
with self.assertRaises(AssertionError):
self.lpot_model.get_gradient('fc.bias')
shape = None
for (name, tensor) in self.lpot_model._model.named_parameters():
if (name == 'fc.bias'):
shape = tensor.shape
tensor.grad = torch.randn(shape)
break
new_grad = torch.zeros(shape)
self.lpot_model.update_gradient('fc.bias', new_grad)
assert torch.equal(torch.tensor(self.lpot_model.get_gradient('fc.bias')), torch.zeros(shape))
rand_input = torch.rand(100, 3, 256, 256).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.tensor(self.lpot_model.get_gradient(rand_input)), torch.ones_like(rand_input))
def test_report_sparsity(self):
(df, total_sparsity) = self.lpot_model.report_sparsity()
self.assertTrue((total_sparsity > 0))
self.assertTrue((len(df) == 22))
def test_WeightOnlyLinear(self):
model = Model()
input = torch.randn(1, 30)
conf = PostTrainingQuantConfig(approach='weight_only')
q_model = quantization.fit(model, conf)
out1 = q_model(input)
q_model.save('saved')
model_size1 = (os.path.getsize('saved/best_model.pt') / 1024)
print('FP32 Model size:{:.3f}M'.format(model_size1))
compression_dtype = [torch.int8, torch.int16, torch.int32, torch.int64]
for dtype in compression_dtype:
new_model = Model()
inc_model = INCModel(new_model)
compressed_model = inc_model.export_compressed_model(qweight_config_path='saved/qconfig.json', compression_dtype=dtype, scale_dtype=torch.float32, use_optimum_format=False)
out2 = q_model(input)
torch.save(compressed_model.state_dict(), 'saved/tmp.pt')
model_size2 = (os.path.getsize('saved/tmp.pt') / 1024)
print('WeightOnlyLinear Model size:{:.3f}M'.format(model_size2))
self.assertTrue(isinstance(compressed_model.fc1, WeightOnlyLinear))
self.assertTrue((compressed_model.fc1.qweight.dtype == dtype))
self.assertTrue((compressed_model.fc1.scales.dtype == torch.float32))
self.assertTrue(((model_size1 / model_size2) > 2))
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
compress_dims = [0, 1]
for dim in compress_dims:
new_model = Model()
inc_model = INCModel(new_model)
compressed_model = inc_model.export_compressed_model(qweight_config_path='saved/qconfig.json', compression_dim=dim, use_optimum_format=False)
out2 = q_model(input)
torch.save(compressed_model.state_dict(), 'saved/tmp.pt')
model_size2 = (os.path.getsize('saved/tmp.pt') / 1024)
print('WeightOnlyLinear Model size:{:.3f}M'.format(model_size2))
self.assertTrue(isinstance(compressed_model.fc1, WeightOnlyLinear))
if (dim == 1):
self.assertTrue((compressed_model.fc1.qweight.shape[1] != compressed_model.fc1.in_features))
else:
self.assertTrue((compressed_model.fc1.qweight.shape[0] != compressed_model.fc1.out_features))
self.assertTrue(((model_size1 / model_size2) > 2))
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5)))
new_model = Model()
inc_model = INCModel(new_model)
compressed_model = inc_model.export_compressed_model(qweight_config_path='saved/qconfig.json')
out2 = q_model(input)
torch.save(compressed_model.state_dict(), 'saved/tmp.pt')
model_size2 = (os.path.getsize('saved/tmp.pt') / 1024)
print('WeightOnlyLinear Model size:{:.3f}M'.format(model_size2))
self.assertTrue(isinstance(compressed_model.fc1, WeightOnlyLinear))
self.assertTrue((compressed_model.fc1.scales.dtype == torch.float16))
self.assertTrue(((model_size1 / model_size2) > 2))
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=0.5))) |
def collect_data(args):
if args.launch:
time.sleep(5)
rospy.init_node('ctp_data_collection_runner')
q0 = GetHomeJointSpace()
rospy.loginfo('Making world...')
world = CostarWorld(robot_config=UR5_C_MODEL_CONFIG)
rospy.loginfo('Aggregating TF data...')
tf_buffer = tf2.Buffer(rospy.Duration(120))
tf_listener = tf2.TransformListener(tf_buffer)
rospy.loginfo('Node started, waiting for transform data...')
rospy.sleep(0.5)
rospy.loginfo('Making stack manager...')
stack_task = GetStackManager()
if args.fake:
world.addObjects(fakeTaskArgs())
filled_args = stack_task.compile(fakeTaskArgs())
observe = IdentityObserver(world, stack_task)
else:
objects = GetDetectObjectsService()
observe = Observer(world=world, task=stack_task, detect_srv=objects, topic='/costar_sp_segmenter/detected_object_list', tf_buffer=tf_buffer, tf_listener=tf_listener)
collector = DataCollector(task=stack_task, data_root='~/.costar/data', rate=args.rate, data_type='h5f', robot_config=UR5_C_MODEL_CONFIG, camera_frame='camera_link', tf_buffer=tf_buffer, tf_listener=tf_listener)
(home, rate, move_to_pose, close_gripper, open_gripper) = initialize_collection_objects(args, observe, collector, stack_task)
def verify(object_name):
pose = None
for i in range(50):
try:
t = rospy.Time(0)
pose = collector.tf_buffer.lookup_transform(collector.base_link, object_name, t)
break
except (tf2.LookupException, tf2.ExtrapolationException, tf2.ConnectivityException) as e:
rospy.sleep(0.1)
if (pose is None):
rospy.logwarn(('Failed lookup: %s to %s' % (collector.base_link, object_name)))
return False
print('object name and pose: ', object_name, pose)
return (pose.transform.translation.z > 0.095)
consecutive_bad_rounds = 0
max_consecutive_bad_rounds = 5
start = max(0, (args.start - 1))
i = start
idx = (i + 1)
if args.one_nn_action:
one_nn_action(move_to_pose, close_gripper, open_gripper, collector.tf_buffer)
return
if args.go_home:
go_home(move_to_pose)
return
home()
try:
while (i < args.execute):
(home_q, home_pose) = home()
collector.set_home_pose(home_pose)
rate.sleep()
t = rospy.Time(0)
home_pose = collector.tf_buffer.lookup_transform(collector.base_link, 'ee_link', t)
print(('home_pose: ' + str(home_pose)))
idx = (i + 1)
rospy.loginfo(('Executing trial %d' % idx))
(_, world) = observe()
reward = 0.0
stack_task.reset()
rospy.loginfo('Starting loop...')
poses = []
cur_pose = None
frame_count = 0
while (not rospy.is_shutdown()):
cur_pose = collector.current_ee_pose
start_time = time.clock()
done = stack_task.tick()
tick_time = time.clock()
if (not collector.update(stack_task.current_action, done)):
raise RuntimeError('could not handle data collection. There may be an inconsistency in the system state so try shutting all of ROS down and starting up again. Alternately, run this program in a debugger to try and diagnose the issue.')
update_time = time.clock()
time_str = 'Total tick + log time: {:04} sec, Robot Tick: {:04} sec, Data Logging: {:04} sec'.format((update_time - start_time), (tick_time - start_time), (update_time - tick_time))
verify_update_rate(update_time_remaining=rate.remaining(), update_rate=args.rate, info=time_str)
rate.sleep()
frame_count += 1
if stack_task.finished_action:
object_was_placed = ((collector.prev_action is not None) and ('place' in collector.prev_action.split(':')[(- 1)]))
if object_was_placed:
rospy.loginfo(('Remembering ' + str(collector.prev_action)))
poses.append(cur_pose)
if done:
if stack_task.ok:
savestr = 'WE WILL SAVE TO DISK'
else:
savestr = 'BUT THERE A PROBLEM WAS DETECTED SO WE ARE SAVING TO DISK AS AN ERROR + FAILURE'
rospy.logwarn(('DONE COLLECTING THIS ROUND, ' + savestr))
if stack_task.ok:
i += 1
if verify(collector.prev_objects[(- 2)]):
reward = 1.0
else:
reward = 0.0
rospy.loginfo(('reward = ' + str(reward)))
break
if stack_task.ok:
collector.save(idx, reward)
print('')
print('Finished one round of data collection. Attempting to automatically ')
print('reset the test environment to continue.')
print('')
print('Example number:', idx, '/', args.execute)
print('Success:', reward)
print('')
consecutive_bad_rounds = 0
else:
collector.save(idx, 'error.failure')
print('')
print((('Bad data collection round, ' + str(consecutive_bad_rounds)) + ' consecutive. Attempting to automatically reset.'))
print('If this happens repeatedly try restarting the program or loading in a debugger.')
collector.reset()
stack_task.reset()
consecutive_bad_rounds += 1
if (consecutive_bad_rounds > 5):
print((('Hit limit of ' + str(max_consecutive_bad_rounds)) + 'max consecutive bad rounds. '))
raise RuntimeError('Killing the program... you may want to debug this or hopefully somebody will restart it automatically! You can try the following bash line for auto restarts: while true; do ./scripts/run.py --execute 1000; done')
rospy.loginfo('Attempting to unstack the blocks')
for (count_from_top, drop_pose) in enumerate(reversed(poses)):
if (drop_pose is None):
continue
unstack_one_block(drop_pose, move_to_pose, close_gripper, open_gripper, i=count_from_top)
if ((len(poses) > 0) and (drop_pose is not None)):
count_from_top += 1
drop_pose.p[2] -= 0.035
result = None
max_tries = 1
tries = 0
while ((tries < max_tries) and (result is None)):
try:
result = unstack_one_block(drop_pose, move_to_pose, close_gripper, open_gripper, i=count_from_top)
except RuntimeError as e:
drop_pose.p[2] += 0.025
tries += 1
rospy.loginfo('Done one loop.')
except RuntimeError as ex:
(ex_type, ex2, tb) = sys.exc_info()
message = ('error.failure due to RuntimeError:\n' + ''.join(traceback.format_exception(etype=type(ex), value=ex, tb=tb)))
rospy.logerr(message)
collector.save(idx, 'error.failure', log=message)
del tb
raise
except KeyboardInterrupt as ex:
message = 'error.failure due to KeyboardInterrupt, collection canceled based on a user request.'
rospy.logerr(message)
collector.save(idx, 'error.failure', log=message)
raise |
class DeepLabV3(nn.Module):
def __init__(self, num_classes, num_layers):
super(DeepLabV3, self).__init__()
self.num_classes = num_classes
layers = num_layers
if (layers == 18):
self.resnet = ResNet18_OS16()
self.aspp = ASPP(num_classes=self.num_classes)
elif (layers == 50):
self.resnet = ResNet50_OS16()
self.aspp = ASPP_Bottleneck(num_classes=self.num_classes)
def forward(self, x):
h = x.size()[2]
w = x.size()[3]
feature_map = self.resnet(x)
output = self.aspp(feature_map)
output = F.upsample(output, size=(h, w), mode='bilinear')
return output |
class WrapperPotential(Potential):
def __init__(self, amp=1.0, pot=None, ro=None, vo=None, _init=None, **kwargs):
if (not _init):
return None
Potential.__init__(self, amp=amp, ro=ro, vo=vo)
self._pot = pot
self.isNonAxi = _isNonAxi(self._pot)
assert physical_compatible(self, self._pot), 'Physical unit conversion parameters (ro,vo) are not compatible between this wrapper and the wrapped potential'
phys_wrapped = get_physical(self._pot, include_set=True)
if ((not self._roSet) and phys_wrapped['roSet']):
self.turn_physical_on(ro=phys_wrapped['ro'], vo=False)
if ((not self._voSet) and phys_wrapped['voSet']):
self.turn_physical_on(vo=phys_wrapped['vo'], ro=False)
def __repr__(self):
wrapped_repr = repr(self._pot)
return ((Potential.__repr__(self) + ', wrapper of') + ''.join([f'''
{s}''' for s in wrapped_repr.split('\n')]))
def __getattr__(self, attribute):
if ((attribute == '_evaluate') or (attribute == '_Rforce') or (attribute == '_zforce') or (attribute == '_phitorque') or (attribute == '_R2deriv') or (attribute == '_z2deriv') or (attribute == '_Rzderiv') or (attribute == '_phi2deriv') or (attribute == '_Rphideriv') or (attribute == '_dens')):
return (lambda R, Z, phi=0.0, t=0.0: self._wrap(attribute, R, Z, phi=phi, t=t))
else:
return super().__getattr__(attribute)
def _wrap_pot_func(self, attribute):
if (attribute == '_evaluate'):
return (lambda p, R, Z, phi=0.0, t=0.0: _evaluatePotentials(p, R, Z, phi=phi, t=t))
elif (attribute == '_dens'):
return (lambda p, R, Z, phi=0.0, t=0.0: evaluateDensities(p, R, Z, phi=phi, t=t, use_physical=False))
elif (attribute == '_Rforce'):
return (lambda p, R, Z, phi=0.0, t=0.0: _evaluateRforces(p, R, Z, phi=phi, t=t))
elif (attribute == '_zforce'):
return (lambda p, R, Z, phi=0.0, t=0.0: _evaluatezforces(p, R, Z, phi=phi, t=t))
elif (attribute == '_phitorque'):
return (lambda p, R, Z, phi=0.0, t=0.0: _evaluatephitorques(p, R, Z, phi=phi, t=t))
elif (attribute == '_R2deriv'):
return (lambda p, R, Z, phi=0.0, t=0.0: evaluateR2derivs(p, R, Z, phi=phi, t=t, use_physical=False))
elif (attribute == '_z2deriv'):
return (lambda p, R, Z, phi=0.0, t=0.0: evaluatez2derivs(p, R, Z, phi=phi, t=t, use_physical=False))
elif (attribute == '_Rzderiv'):
return (lambda p, R, Z, phi=0.0, t=0.0: evaluateRzderivs(p, R, Z, phi=phi, t=t, use_physical=False))
elif (attribute == '_phi2deriv'):
return (lambda p, R, Z, phi=0.0, t=0.0: _evaluatePotentials(p, R, Z, phi=phi, t=t, dphi=2))
elif (attribute == '_Rphideriv'):
return (lambda p, R, Z, phi=0.0, t=0.0: _evaluatePotentials(p, R, Z, phi=phi, t=t, dR=1, dphi=1))
else:
raise AttributeError(('Attribute %s not found in for this WrapperPotential' % attribute)) |
def run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8):
try:
worker_name = multiprocessing.current_process().name
worker_id = (int(worker_name[(worker_name.find('-') + 1):]) - 1)
gpu_id = (worker_id % num_gpu)
torch.cuda.set_device(gpu_id)
except:
pass
def _results_exist():
if (seq.object_ids is None):
if (seq.dataset in ['trackingnet', 'got10k']):
base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)
bbox_file = '{}.txt'.format(base_results_path)
else:
bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)
return os.path.isfile(bbox_file)
else:
bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]
missing = [(not os.path.isfile(f)) for f in bbox_files]
return (sum(missing) == 0)
if (_results_exist() and (not debug)):
print('FPS: {}'.format((- 1)))
return
print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))
output = tracker.run_sequence(seq, debug=debug)
sys.stdout.flush()
if isinstance(output['time'][0], (dict, OrderedDict)):
exec_time = sum([sum(times.values()) for times in output['time']])
num_frames = len(output['time'])
else:
exec_time = sum(output['time'])
num_frames = len(output['time'])
print('FPS: {}'.format((num_frames / exec_time)))
if (not debug):
_save_tracker_output(seq, tracker, output) |
def get_filepath(dataset, architecture, seed, step, layer, folder=False):
if folder:
return os.path.join(EMBEDDING_PATH, dataset, architecture, str(seed), str(step), str(layer))
else:
return os.path.join(EMBEDDING_PATH, dataset, architecture, str(seed), str(step), str(layer), 'rep.npy') |
class NewAttention(nn.Module):
def __init__(self, enc_dim: int, dec_dim: int, attn_type='dot'):
super(NewAttention, self).__init__()
self.enc_dim = enc_dim
self.dec_dim = dec_dim
self.attn_type = attn_type
self._relu = nn.ReLU()
if (self.attn_type == 'general'):
self.W_h = nn.Linear(enc_dim, dec_dim, bias=True)
self.W_s = nn.Linear(dec_dim, dec_dim, bias=True)
self.v = nn.Linear(dec_dim, 1)
raise NotImplementedError
elif (self.attn_type == 'dot'):
self.W_h = nn.Linear(enc_dim, dec_dim, bias=True)
else:
raise NotImplementedError
def forward_one_step(self, enc_state, dec_state, enc_mask, prev_attn=None, penalty_val=10):
(batch_size_, src_len, enc_dim) = enc_state.size()
(batch_size, dec_dim) = dec_state.size()
assert (batch_size == batch_size_)
assert (enc_dim == self.enc_dim)
assert (dec_dim == self.dec_dim)
if (prev_attn is not None):
gating_prev_attn = self._relu(self.gate_prev_attn(dec_state))
gated_prev_attn = (gating_prev_attn * prev_attn)
if (self.attn_type == 'dot'):
_middle = self.W_h(enc_state)
unsqueezed_dec_state = dec_state.unsqueeze(2)
score = torch.matmul(_middle, unsqueezed_dec_state)
score = score.squeeze(2)
elif (self.attn_type == 'general'):
w_enc = self.W_h(enc_state)
w_dec = self.W_s(dec_state).unsqueeze(1)
_middle = torch.tanh((w_enc + w_dec))
score = self.v(_middle)
score = score.squeeze(2)
else:
raise NotImplementedError
if (prev_attn is not None):
penaltied_score = ((score + ((enc_mask - 1) * penalty_val)) - gated_prev_attn)
else:
penaltied_score = (score + ((enc_mask - 1) * penalty_val))
attention_distribution = self.masked_softmax(penaltied_score, enc_mask)
return (attention_distribution, penaltied_score)
def masked_softmax(score, mask):
if (mask is not None):
score = score.masked_fill((mask == 0), (- .0))
attn_dist = F.softmax(score, dim=1)
return attn_dist |
class CLIPVisionConfig(PretrainedConfig):
model_type = 'clip_vision_model'
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act |
class BNILoss(_Loss):
def __init__(self, init_noise_sigma, bucket_centers, bucket_weights):
super(BNILoss, self).__init__()
self.noise_sigma = torch.nn.Parameter(torch.tensor(init_noise_sigma, device='cuda'))
self.bucket_centers = torch.tensor(bucket_centers).cuda()
self.bucket_weights = torch.tensor(bucket_weights).cuda()
def forward(self, pred, target):
noise_var = (self.noise_sigma ** 2)
loss = bni_loss(pred, target, noise_var, self.bucket_centers, self.bucket_weights)
return loss |
class Discriminator_cifar32(nn.Module):
def __init__(self, optimizer, optimizer_name, lr, betas):
super().__init__()
m_g = 4
ch = 512
self.projection = nn.utils.weight_norm(nn.Conv2d(3, 1, kernel_size=4, stride=1, padding=2, bias=False), name='weight')
self.projection.weight_g.data.fill_(1)
self.layer1 = self.make_layer(1, (ch // 8))
self.layer2 = self.make_layer((ch // 8), (ch // 4))
self.layer3 = self.make_layer((ch // 4), (ch // 2))
self.layer4 = nn.Sequential(nn.Conv2d((ch // 2), ch, 3, 1, 1), nn.LeakyReLU(0.2))
self.linear = nn.Linear(((ch * m_g) * m_g), 1, 1)
if (optimizer_name == 'adam'):
self.optimizer = optimizer(((((list(self.layer1.parameters()) + list(self.layer2.parameters())) + list(self.layer3.parameters())) + list(self.layer4.parameters())) + list(self.linear.parameters())), lr=lr, betas=betas)
elif (optimizer_name == 'amsgrad'):
self.optimizer = optimizer(((((list(self.layer1.parameters()) + list(self.layer2.parameters())) + list(self.layer3.parameters())) + list(self.layer4.parameters())) + list(self.linear.parameters())), lr=lr, betas=betas, amsgrad=True)
elif (optimizer_name == 'rmsprop'):
self.optimizer = optimizer(((((list(self.layer1.parameters()) + list(self.layer2.parameters())) + list(self.layer3.parameters())) + list(self.layer4.parameters())) + list(self.linear.parameters())), lr=lr, alpha=betas[0])
def make_layer(self, in_plane, out_plane):
return nn.Sequential(nn.Conv2d(in_plane, out_plane, 3, 1, 1), nn.LeakyReLU(0.2), nn.Conv2d(out_plane, out_plane, 4, 2, 1), nn.LeakyReLU(0.2))
def forward(self, x):
p_x = self.projection(x)
out = self.layer1(p_x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return torch.sigmoid(out.squeeze()) |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}
if (model_ema is not None):
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
print('saved at', checkpoint_paths)
if (is_main_process() and isinstance(epoch, int)):
to_del = (epoch - (args.save_ckpt_num * args.save_ckpt_freq))
old_ckpt = (output_dir / ('checkpoint-%s.pth' % to_del))
if os.path.exists(old_ckpt):
os.remove(old_ckpt) |
class SolveRestrictedGameIncreasingTimeToSolve(SolveRestrictedGame):
def __init__(self, scenario: NXDOScenario, dont_solve_first_n_nxdo_iters: int, increase_multiplier: float, starting_episodes: int=None, starting_steps: int=None, required_fields: Union[(List[str], None)]=None):
self.scenario = scenario
if ((starting_steps is not None) and (starting_episodes is not None)):
raise ValueError('Can only provide one of [starting_episodes, starting_steps] to this StoppingCondition)')
self._current_episodes_for_an_iter = (int(starting_episodes) if (starting_episodes is not None) else None)
self._current_steps_for_an_iter = (int(starting_steps) if (starting_steps is not None) else None)
self._increase_multiplier = increase_multiplier
self._current_nxdo_iter = 0
self._dont_solve_first_n_nxdo_iters = dont_solve_first_n_nxdo_iters
if (required_fields is None):
required_fields = []
if (scenario.calculate_openspiel_metanash and (not scenario.calculate_openspiel_metanash_at_end) and ('avg_policy_exploitability' not in required_fields)):
required_fields.append('avg_policy_exploitability')
self.required_fields = required_fields
def __call__(self, log_dir: str, br_spec_lists_for_each_player: Dict[(int, List[StrategySpec])], manager_metadata: dict=None) -> RestrictedGameSolveResult:
if (self._current_nxdo_iter < self._dont_solve_first_n_nxdo_iters):
stopping_condition = TwoPlayerBRFixedTrainingLengthStoppingCondition(fixed_steps=0, required_fields_in_last_train_iter=self.required_fields)
else:
stopping_condition = TwoPlayerBRFixedTrainingLengthStoppingCondition(fixed_episodes=self._current_episodes_for_an_iter, fixed_steps=self._current_steps_for_an_iter, required_fields_in_last_train_iter=self.required_fields)
if (self._current_episodes_for_an_iter is not None):
self._current_episodes_for_an_iter *= self._increase_multiplier
if (self._current_steps_for_an_iter is not None):
self._current_steps_for_an_iter *= self._increase_multiplier
self._current_nxdo_iter += 1
return _solve_game(scenario=self.scenario, log_dir=log_dir, br_spec_lists_for_each_player=br_spec_lists_for_each_player, stopping_condition=stopping_condition, manager_metadata=manager_metadata) |
def vgg11_bn(config, **kwargs):
if config.pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True, norm_type=config.norm_type), **kwargs)
if config.pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']), strict=False)
return model |
class SupConCELoss(nn.Module):
def __init__(self, weights, alpha=0.5, device='cuda:0', temperature=0.06):
super().__init__()
self.supcon = SupConLoss(temperature=temperature, device=device)
self.ce = nn.CrossEntropyLoss(weight=weights)
self.alpha = alpha
def forward(self, projection1, projection2, prediction1, prediction2, target):
predictions = torch.cat([prediction1, prediction2], dim=0)
labels = torch.cat([target, target], dim=0)
return ((self.alpha * self.supcon(projection1, projection2, target)) + ((1 - self.alpha) * self.ce(predictions, labels))) |
def set_homotopy_continuation_parameter(idx, val):
from phcpy.phcpy2c3 import py2c_padcon_set_homotopy_continuation_parameter
return py2c_padcon_set_homotopy_continuation_parameter(idx, val) |
def cal_formal_charge(atomic_symbol, bonds) -> int:
if (atomic_symbol == 'N'):
if (sum((j for (i, j) in bonds)) == 4):
return 1
return 0 |
class RNNLMModelTrainer(tf.Module):
def __init__(self, model: RNNLMModel, config):
super().__init__()
self.model = model
self.learning_rate = tf.Variable(0.001, dtype=tf.float32, trainable=False)
self.optimizer = tf.optimizers.SGD(learning_rate=self.learning_rate)
self.max_grad_norm = config.max_grad_norm
self.eval_mean_loss = tf.metrics.Mean()
def train_one_epoch(self, data_producer, learning_rate, verbose=True):
print('start epoch with learning rate {}'.format(learning_rate))
self.learning_rate.assign(learning_rate)
for (i, (inputs, labels)) in enumerate(data_producer.iterate()):
loss = self._train_step(inputs, labels)
if (verbose and ((i % (data_producer.epoch_size // 10)) == 1)):
print('{}/{}: loss={}'.format(i, data_producer.epoch_size, loss))
def evaluate(self, data_producer):
self.eval_mean_loss.reset_states()
for (i, (inputs, labels)) in enumerate(data_producer.iterate()):
loss = self.model.get_loss(inputs, labels)
self.eval_mean_loss.update_state(loss)
return self.eval_mean_loss.result()
def _train_step(self, inputs, labels):
with tf.GradientTape() as tape:
loss = self.model.get_loss(inputs, labels, is_training=True)
tvars = self.model.trainable_variables
grads = tape.gradient(loss, tvars)
(clipped_grads, _) = tf.clip_by_global_norm(grads, self.max_grad_norm)
self.optimizer.apply_gradients(zip(clipped_grads, tvars))
return loss |
_module(name='Kaiming')
class KaimingInit(BaseInit):
def __init__(self, a=0, mode='fan_out', nonlinearity='relu', distribution='normal', **kwargs):
super().__init__(**kwargs)
self.a = a
self.mode = mode
self.nonlinearity = nonlinearity
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len((set(self.layer) & set(([layername] + basesname)))):
kaiming_init(m, self.a, self.mode, self.nonlinearity, self.bias, self.distribution)
module.apply(init) |
def alltoall(inputs, per_rank_split_lengths):
global myreq
(N, E) = inputs[0].size()
a2ai = All2AllInfo()
a2ai.lS = len(inputs)
a2ai.gSS = per_rank_split_lengths
(a2ai.lN, a2ai.gNS) = get_split_lengths(N)
a2ai.E = E
a2ai.N = N
a2ai.S = (sum(per_rank_split_lengths) if per_rank_split_lengths else (a2ai.lS * my_size))
if (((a2a_impl == '') and alltoall_supported) or (a2a_impl == 'alltoall')):
output = All2All_Req.apply(a2ai, *inputs)
myreq.WaitFunction = All2All_Wait
elif ((a2a_impl == '') or (a2a_impl == 'scatter')):
output = All2All_Scatter_Req.apply(a2ai, *inputs)
myreq.WaitFunction = All2All_Scatter_Wait
elif (a2a_impl == 'scatter_list'):
output = All2All_ScatterList_Req.apply(a2ai, *inputs)
myreq.WaitFunction = All2All_ScatterList_Wait
else:
print(('Unknown value set for DLRM_ALLTOALL_IMPL (%s), please use one of [alltoall, scatter, scatter_list]' % a2a_impl))
return myreq |
(inducing_variables.MultioutputInducingVariables, TensorLike, TensorLike)
def _linear_multioutput(Z: inducing_variables.MultioutputInducingVariables, u: TensorLike, f: TensorLike, *, L: TensorLike=None, diag: TensorLike=None, basis: AbstractBasis=None, multioutput_axis: int='default', **kwargs):
assert (tuple(u.shape) == tuple(f.shape))
if (multioutput_axis == 'default'):
multioutput_axis = (None if (basis is None) else 0)
if (diag is None):
diag = default_jitter()
if isinstance(diag, float):
diag = tf.convert_to_tensor(diag, dtype=f.dtype)
diag = tf.expand_dims(diag, axis=(- 1))
if (basis is None):
if isinstance(Z, inducing_variables.InducingVariables):
feat = inducing_to_tensor(Z)
else:
feat = Z
elif isinstance(Z, inducing_variables.SharedIndependentInducingVariables):
feat = basis(Z)
else:
feat = basis(Z, multioutput_axis=0)
err = swap_axes((u - f), (- 3), (- 1))
err -= (tf.sqrt(diag) * tf.random.normal(err.shape, dtype=err.dtype))
(M, D) = feat.shape[(- 2):]
if (L is None):
if (D < M):
feat_iDiag = (feat * tf.math.reciprocal(diag))
S = tf.matmul(feat_iDiag, feat, transpose_a=True)
L = tf.linalg.cholesky((S + tf.eye(S.shape[(- 1)], dtype=S.dtype)))
else:
K = tf.matmul(feat, feat, transpose_b=True)
K = tf.linalg.set_diag(K, (tf.linalg.diag_part(K) + diag[(..., 0)]))
L = tf.linalg.cholesky(K)
else:
assert (L.shape[(- 1)] == min(M, D))
if (D < M):
feat_iDiag = (feat * tf.math.reciprocal(diag))
weights = tf.linalg.adjoint(tf.linalg.cholesky_solve(L, tf.matmul(feat_iDiag, err, transpose_a=True)))
else:
iK_err = tf.linalg.cholesky_solve(L, err)
weights = tf.matmul(iK_err, feat, transpose_a=True)
return MultioutputDenseSampler(basis=basis, weights=swap_axes(weights, (- 3), (- 2)), multioutput_axis=multioutput_axis, **kwargs) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='(Deprecated, please use --gpu-id) number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--diff_seed', action='store_true', help='Whether or not set different seeds for different ranks')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='--options is deprecated in favor of --cfg_options\' and it will not be supported in version v0.22.0. Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--auto-resume', action='store_true', help='resume from the latest checkpoint automatically.')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args |
def create_sub_dirs(opt, sub_dirs):
for sub_dir in sub_dirs:
dir_path = os.path.join(opt.expr_dir, sub_dir)
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
setattr(opt, sub_dir, dir_path) |
class Downsample_module(nn.Module):
def __init__(self, block, num_blocks, num_steps=4, num_units=4, has_skip=False, norm_cfg=dict(type='BN'), in_channels=64, expand_times=26):
norm_cfg = cp.deepcopy(norm_cfg)
super().__init__()
self.has_skip = has_skip
self.in_channels = in_channels
assert (len(num_blocks) == num_units)
self.num_blocks = num_blocks
self.num_units = num_units
self.num_steps = num_steps
self.norm_cfg = norm_cfg
self.layer1 = self._make_layer(block, in_channels, num_blocks[0], expand_times=expand_times, res_top_channels=in_channels)
for i in range(1, num_units):
module_name = f'layer{(i + 1)}'
self.add_module(module_name, self._make_layer(block, (in_channels * pow(2, i)), num_blocks[i], stride=2, expand_times=expand_times, res_top_channels=in_channels))
def _make_layer(self, block, out_channels, blocks, stride=1, expand_times=26, res_top_channels=64):
downsample = None
if ((stride != 1) or (self.in_channels != (out_channels * block.expansion))):
downsample = ConvModule(self.in_channels, (out_channels * block.expansion), kernel_size=1, stride=stride, padding=0, norm_cfg=self.norm_cfg, act_cfg=None, inplace=True)
units = list()
units.append(block(self.in_channels, out_channels, num_steps=self.num_steps, stride=stride, downsample=downsample, norm_cfg=self.norm_cfg, expand_times=expand_times, res_top_channels=res_top_channels))
self.in_channels = (out_channels * block.expansion)
for _ in range(1, blocks):
units.append(block(self.in_channels, out_channels, num_steps=self.num_steps, expand_times=expand_times, res_top_channels=res_top_channels))
return nn.Sequential(*units)
def forward(self, x, skip1, skip2):
out = list()
for i in range(self.num_units):
module_name = f'layer{(i + 1)}'
module_i = getattr(self, module_name)
x = module_i(x)
if self.has_skip:
x = ((x + skip1[i]) + skip2[i])
out.append(x)
out.reverse()
return tuple(out) |
class CaptureLogger():
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ''
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f'''captured: {self.out}
''' |
_model
def regnetx_080(pretrained=False, **kwargs):
return _regnet('regnetx_080', pretrained, **kwargs) |
def get_suffix(phone):
if (len(phone) < 3):
print('{}: invalid phone {} (please check if the phone is position-dependent)'.format(sys.argv[0], phone), file=sys.stderr)
sys.exit(1)
return phone[(- 2):] |
class MyLightningModule(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = resnet18(pretrained=True)
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, 37)
self.criterion = torch.nn.CrossEntropyLoss()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
(x, y) = batch
output = self.model(x)
loss = self.criterion(output, y)
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
(x, y) = batch
output = self.forward(x)
loss = self.criterion(output, y)
pred = torch.argmax(output, dim=1)
acc = (torch.sum((y == pred)).item() / (len(y) * 1.0))
metrics = {'test_acc': acc, 'test_loss': loss}
self.log_dict(metrics)
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005) |
class Priority(Enum):
HIGHEST = 0
VERY_HIGH = 10
HIGH = 30
ABOVE_NORMAL = 40
NORMAL = 50
BELOW_NORMAL = 60
LOW = 70
VERY_LOW = 90
LOWEST = 100 |
class manifold_cluster_generator(N2D.UmapGMM):
def __init__(self, manifold_class, manifold_args, cluster_class, cluster_args):
self.manifold_in_embedding = manifold_class(**manifold_args)
self.cluster_manifold = cluster_class(**cluster_args)
proba = getattr(self.cluster_manifold, 'predict_proba', None)
self.proba = callable(proba)
self.hle = None
def fit(self, hl):
super().fit(hl)
def predict(self, hl):
if self.proba:
super().predict(hl)
else:
manifold = self.manifold_in_embedding.transform(hl)
y_pred = self.cluster_manifold.predict(manifold)
return np.asarray(y_pred)
def fit_predict(self, hl):
if self.proba:
super().fit_predict(hl)
else:
self.hle = self.manifold_in_embedding.fit_transform(hl)
y_pred = self.cluster_manifold.fit_predict(self.hle)
return np.asarray(y_pred)
def predict_proba(self, hl):
if self.proba:
super().predict_proba(hl)
else:
print('Your clusterer cannot predict probabilities') |
def launch_training(c, desc, outdir, dry_run):
dnnlib.util.Logger(should_flush=True)
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
matching_dirs = [re.fullmatch(('\\d{5}' + f'-{desc}'), x) for x in prev_run_dirs if (re.fullmatch(('\\d{5}' + f'-{desc}'), x) is not None)]
if ((c.restart_every > 0) and (len(matching_dirs) > 0)):
assert (len(matching_dirs) == 1), f'Multiple directories found for resuming: {matching_dirs}'
c.run_dir = os.path.join(outdir, matching_dirs[0].group())
else:
prev_run_ids = [re.match('^\\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if (x is not None)]
cur_run_id = (max(prev_run_ids, default=(- 1)) + 1)
c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}')
assert (not os.path.exists(c.run_dir))
print()
print('Training options:')
print(json.dumps(c, indent=2))
print()
print(f'Output directory: {c.run_dir}')
print(f'Number of GPUs: {c.num_gpus}')
print(f'Batch size: {c.batch_size} images')
print(f'Training duration: {c.total_kimg} kimg')
print(f'Dataset path: {c.training_set_kwargs.path}')
print(f'Dataset size: {c.training_set_kwargs.max_size} images')
print(f'Dataset resolution: {c.training_set_kwargs.resolution}')
print(f'Dataset labels: {c.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {c.training_set_kwargs.xflip}')
print()
if dry_run:
print('Dry run; exiting.')
return
print('Creating output directory...')
os.makedirs(c.run_dir, exist_ok=(c.restart_every > 0))
with open(os.path.join(c.run_dir, 'training_options.json'), 'wt+') as f:
json.dump(c, f, indent=2)
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if (c.num_gpus == 1):
subprocess_fn(rank=0, c=c, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(c, temp_dir), nprocs=c.num_gpus) |
def get_overfitting_coefficient(eigenlearnabilities, n, mults):
return (n / (n - ((eigenlearnabilities ** 2) * mults).sum())) |
def clean_jhu_interventions(data_dir=oj('..', '..', 'raw', 'jhu_interventions'), out_dir='.'):
df = load_jhu_interventions(data_dir=data_dir)
remap = {'FIPS': 'countyFIPS', 'AREA_NAME': 'County Name', 'STATE': 'State Name'}
df = df.rename(columns=remap)
df['countyFIPS'] = df['countyFIPS'].astype(str).str.zfill(5)
df.to_csv(oj(out_dir, 'jhu_interventions.csv'), header=True, index=False)
return df |
class Florence(Instance, ABC):
def __init__(self):
super(Florence, self).__init__()
self.dst = '/scratch/NFC/OnFlame/FLORENCE/'
self.src = '/scratch/NFC/MICC_Florence/'
def get_min_det_score(self):
return 0.85
def get_images(self):
images = {}
for actor in sorted(glob((self.get_src() + 'images/*'))):
imgs = sorted(list(filter((lambda f: ('PTZ-Outdoor' not in f)), glob(f'{actor}/*/*.jpg'))))
indecies = np.random.choice(len(imgs), 1000, replace=False)
images[Path(actor).stem] = [imgs[i] for i in indecies]
return images
def get_flame_params(self):
params = {}
for actor in sorted(glob((self.get_src() + 'FLAME_parameters/iter1/*'))):
params[Path(actor).stem] = glob(f'{actor}/*.npz')
return params
def get_registrations(self):
registrations = {}
for actor in sorted(glob((self.get_src() + 'registrations/iter1/*'))):
if ('rendering' in actor):
continue
registrations[Path(actor).stem] = glob(f'{actor}/*.obj')
return registrations |
class Local(Optimizer):
def __init__(self, named_params, lr=required):
(self.param_names, params) = zip(*named_params)
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {}'.format(lr))
defaults = dict(lr=lr)
super(Local, self).__init__(params, defaults)
def local_step(self, d_p, layer_name, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
layer_index = self.param_names.index((layer_name + '.weight'))
p = group['params'][layer_index]
p.data.add_((group['lr'] * d_p))
try:
self._step_count += 1
except AttributeError:
pass
return loss |
def test_yolov3_neck():
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
with pytest.raises(AssertionError):
neck = YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [(s // (2 ** i)) for i in range((len(in_channels) - 1), (- 1), (- 1))]
feats = [torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range((len(in_channels) - 1), (- 1), (- 1))]
neck = YOLOV3Neck(num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert (len(outs) == len(feats))
for i in range(len(outs)):
assert (outs[i].shape == (1, out_channels[i], feat_sizes[i], feat_sizes[i]))
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [(s // (2 ** i)) for i in range((len(in_channels) - 1), (- 1), (- 1))]
feats = [torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i]) for i in range((len(in_channels) - 1), (- 1), (- 1))]
neck = YOLOV3Neck(num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert (len(outs) == len(feats))
for i in range(len(outs)):
assert (outs[i].shape == (1, out_channels[i], feat_sizes[i], feat_sizes[i])) |
class Logger(keras.callbacks.Callback):
def __init__(self, log_dir=None, num_epochs=None):
super(Logger, self).__init__()
self.log_dir = log_dir
self.num_epochs = num_epochs
def on_train_begin(self, logs={}):
self.i = 0
self.t0 = time.time()
self.x = []
self.losses = []
self.val_losses = []
self.accuracies = []
self.val_accuracies = []
self.learning_rates = []
self.dts = []
self.fig = plt.figure(figsize=(20, 7))
def on_epoch_end(self, epoch, logs={}):
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.accuracies.append(logs.get('acc'))
self.val_accuracies.append(logs.get('val_acc'))
self.learning_rates.append(keras.backend.get_value(self.model.optimizer.lr))
self.dts.append(((time.time() - self.t0) / 60))
self.i += 1
self.plot()
def plot(self, showFig=False):
plt.clf()
ax1 = plt.subplot(1, 3, 1)
plt.plot(self.x, self.losses, label='loss')
plt.plot(self.x, self.val_losses, label='val_loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
if (self.num_epochs is not None):
plt.xlim(0, self.num_epochs)
ax1 = plt.subplot(1, 3, 2)
plt.plot(self.x, self.accuracies, label='acc')
plt.plot(self.x, self.val_accuracies, label='val_acc')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
if (self.num_epochs is not None):
plt.xlim(0, self.num_epochs)
plt.ylim(0, 1)
ax1 = plt.subplot(1, 3, 3)
plt.plot(self.x, self.learning_rates)
plt.xlabel('Epochs')
plt.ylabel('Learning rate')
if (self.num_epochs is not None):
plt.xlim(0, self.num_epochs)
plt.tight_layout()
if showFig:
plt.show()
else:
plt.savefig(os.path.join(self.log_dir, ('loss_acc_' + '.png')), dpi=300) |
class Cnn10_kw(nn.Module):
def __init__(self, config):
super(Cnn10_kw, self).__init__()
self.bn0 = nn.BatchNorm2d(64)
sr = config.wav.sr
window_size = config.wav.window_size
hop_length = config.wav.hop_length
mel_bins = config.wav.mel_bins
fmin = config.wav.fmin
fmax = config.wav.fmax
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_length, win_length=window_size, window='hann', center=True, pad_mode='reflect', freeze_parameters=True)
self.logmel_extractor = LogmelFilterBank(sr=sr, n_fft=window_size, n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=1.0, amin=1e-10, top_db=None, freeze_parameters=True)
self.is_spec_augment = config.training.spec_augmentation
if self.is_spec_augment:
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, freq_drop_width=8, freq_stripes_num=2, mask_type='zero_value')
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc1)
def forward(self, input):
x = self.spectrogram_extractor(input)
x = self.logmel_extractor(x)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if (self.training and self.is_spec_augment):
x = self.spec_augmenter(x)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = (x1 + x2)
x = F.relu_(self.fc1(x))
x = F.dropout(x, p=0.2, training=self.training)
return x |
def waymo_data_prep(root_path, split, nsweeps=1):
waymo_ds.create_waymo_infos(root_path, split=split, nsweeps=nsweeps)
if (split == 'train'):
create_groundtruth_database('WAYMO', root_path, (Path(root_path) / 'infos_train_{:02d}sweeps_filter_zero_gt.pkl'.format(nsweeps)), used_classes=['VEHICLE', 'CYCLIST', 'PEDESTRIAN'], nsweeps=nsweeps) |
def calc_flops(model, img_size=224):
with torch.no_grad():
x = torch.randn(1, 3, img_size, img_size).cuda()
fca1 = FlopCountAnalysis(model, x)
print('backbone:', (fca1.total(module_name='backbone') / .0))
try:
print('text_encoder:', (fca1.total(module_name='text_encoder') / .0))
print('context_decoder:', (fca1.total(module_name='context_decoder') / .0))
except:
pass
try:
print('neck:', (fca1.total(module_name='neck') / .0))
except:
pass
print('decode_head:', (fca1.total(module_name='decode_head') / .0))
flops1 = fca1.total()
print('#### GFLOPs: {:.1f}'.format((flops1 / .0)))
return (flops1 / .0) |
def transform(df, val):
train_df = df.fillna(value=val)
train_df['prior_question_had_explanation'] = train_df['prior_question_had_explanation'].astype(int)
return train_df |
def train(args, model, train_loader, optimizer, epoch):
model.train()
for (batch_idx, (data, target)) in enumerate(train_loader):
(data, target) = (data.cuda(), target.cuda())
optimizer.zero_grad()
loss = F.cross_entropy(model(data), target)
loss.backward()
optimizer.step()
if ((batch_idx % args.log_interval) == 0):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(data)), len(train_loader.dataset), ((100.0 * batch_idx) / len(train_loader)), loss.item())) |
def process_single_fragment(cfg, color_files, depth_files, frag_id, n_frags, intrinsic_path, out_folder):
import open3d as o3d
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
n_frames = len(color_files)
intrinsic = read_intrinsic(intrinsic_path, cfg.width, cfg.height)
volume = o3d.geometry.integration.ScalableTSDFVolume(voxel_length=(cfg.tsdf_cubic_size / 512.0), sdf_trunc=0.04, color_type=o3d.geometry.integration.TSDFVolumeColorType.RGB8)
sid = (frag_id * cfg.frames_per_frag)
eid = min((sid + cfg.frames_per_frag), n_frames)
pose_base2world = None
pose_base2world_inv = None
frag_frames = list()
for fid in range(sid, eid):
color_path = color_files[fid]
depth_path = depth_files[fid]
pose_path = (color_path[:(- 10)] + '.pose.txt')
pose_cam2world = read_extrinsic(pose_path)
if (pose_cam2world is None):
continue
if (fid == sid):
pose_base2world = pose_cam2world
pose_base2world_inv = np.linalg.inv(pose_base2world)
if (pose_base2world_inv is None):
break
pose_cam2world = np.matmul(pose_base2world_inv, pose_cam2world)
rgbd = read_rgbd_image(cfg, color_path, depth_path, False)
volume.integrate(rgbd, intrinsic, np.linalg.inv(pose_cam2world))
frag_frames.append(color_path[:(- 10)])
if (pose_base2world_inv is None):
return
pcloud = volume.extract_point_cloud()
o3d.geometry.PointCloud.estimate_normals(pcloud)
o3d.io.write_point_cloud(osp.join(out_folder, 'cloud_bin_{}.ply'.format(frag_id)), pcloud)
np.save(osp.join(out_folder, 'cloud_bin_{}.pose.npy'.format(frag_id)), pose_base2world)
time.sleep(0.1)
with open(osp.join(out_folder, 'cloud_bin_{}.frames.pkl'.format(frag_id)), 'wb') as fh:
to_save = {'frames': frag_frames}
pickle.dump(to_save, fh, protocol=pickle.HIGHEST_PROTOCOL) |
def get_learning_rate():
if FLAGS.fine_tune_checkpoint:
return 0.0001
else:
return 0.045 |
def use_gpu(opt):
return ((hasattr(opt, 'gpu_ranks') and (len(opt.gpu_ranks) > 0)) or (hasattr(opt, 'gpu') and (opt.gpu > (- 1)))) |
def _group_checkpoint_keys(keys: List[str]) -> Dict[(str, List[str])]:
groups = defaultdict(list)
for key in keys:
pos = key.rfind('.')
if (pos >= 0):
(head, tail) = (key[:pos], [key[(pos + 1):]])
else:
(head, tail) = (key, [])
groups[head].extend(tail)
return groups |
def get_box_proposal(fs_serv, img_path):
from show_boxes import show_detsB_boxes
q_dets_p = fs_serv.get_box_proposal(img_path)
image_basename = os.path.basename(img_path)
save_file_path = os.path.join(disp_folder, 'box_prop_{0}'.format(image_basename))
img = cv2.cvtColor(cv2.imread(img_path, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB)
show_detsB_boxes(img, q_dets_p, save_file_path=save_file_path) |
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
self._obs_buffer = np.zeros(((2,) + env.observation_space.shape), dtype=np.uint8)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for i in range(self._skip):
(obs, reward, done, info) = self.env.step(action)
if (i == (self._skip - 2)):
self._obs_buffer[0] = obs
if (i == (self._skip - 1)):
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
max_frame = self._obs_buffer.max(axis=0)
return (max_frame, total_reward, done, info)
def reset(self, **kwargs):
return self.env.reset(**kwargs) |
def get_transform(opt):
transform_list = []
if (opt.resize_or_crop == 'resize_and_crop'):
osize = [opt.loadSizeX, opt.loadSizeY]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'crop'):
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif (opt.resize_or_crop == 'scale_width'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.fineSize))))
elif (opt.resize_or_crop == 'scale_width_and_crop'):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.loadSizeX))))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if (opt.isTrain and (not opt.no_flip)):
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list) |
def l_resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
L.load_pretrained_params(model, model_url=model_urls['resnet101'])
return model |
def _get_default_kv_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _KvVariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store |
def get_scores(ftrain, ftest, food, labelstrain, args):
if (args.clusters == 1):
return get_scores_one_cluster(ftrain, ftest, food)
else:
if (args.training_mode == 'SupCE'):
print('Using data labels as cluster since model is cross-entropy')
ypred = labelstrain
else:
ypred = get_clusters(ftrain, args.clusters)
return get_scores_multi_cluster(ftrain, ftest, food, ypred) |
def process_folder(q, static_frames, test_scenes, data_dir, output_dir, stride=1):
while True:
if q.empty():
break
folder = q.get()
if (folder in static_frames.keys()):
static_ids = static_frames[folder]
else:
static_ids = []
scene = folder.split('/')[1]
if (scene[:(- 5)] in test_scenes):
continue
image_path = os.path.join(data_dir, folder, 'image_02/data')
dump_image_path = os.path.join(output_dir, folder)
if (not os.path.isdir(dump_image_path)):
os.makedirs(dump_image_path)
f = open(os.path.join(dump_image_path, 'train.txt'), 'w')
numbers = len(os.listdir(image_path))
for n in range((numbers - (2 * stride))):
s_idx = n
m_idx = (s_idx + stride)
e_idx = (s_idx + (2 * stride))
if ((('%.10d' % s_idx) in static_ids) or (('%.10d' % e_idx) in static_ids) or (('%.10d' % m_idx) in static_ids)):
continue
curr_image = imageio.imread((os.path.join(image_path, ('%.10d' % s_idx)) + '.png'))
midd_image = imageio.imread((os.path.join(image_path, ('%.10d' % m_idx)) + '.png'))
next_image = imageio.imread((os.path.join(image_path, ('%.10d' % e_idx)) + '.png'))
seq_images = np.concatenate([curr_image, midd_image, next_image], axis=0)
imageio.imsave((os.path.join(dump_image_path, ('%.10d' % s_idx)) + '.png'), seq_images.astype('uint8'))
date = folder.split('/')[0]
f.write(('%s %s\n' % ((os.path.join(folder, ('%.10d' % s_idx)) + '.png'), os.path.join(date, 'calib_cam_to_cam.txt'))))
print(folder) |
class GlobalPool(nn.Module):
def __init__(self, cfg):
super(GlobalPool, self).__init__()
self.cfg = cfg
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.maxpool = nn.AdaptiveMaxPool2d((1, 1))
self.exp_pool = ExpPool()
self.pcampool = PcamPool()
self.linear_pool = LinearPool()
self.lse_pool = LogSumExpPool(cfg.lse_gamma)
def cuda(self, device=None):
return self._apply((lambda t: t.cuda(device)))
def forward(self, feat_map, logit_map):
if (self.cfg.global_pool == 'AVG'):
return self.avgpool(feat_map)
elif (self.cfg.global_pool == 'MAX'):
return self.maxpool(feat_map)
elif (self.cfg.global_pool == 'PCAM'):
return self.pcampool(feat_map, logit_map)
elif (self.cfg.global_pool == 'AVG_MAX'):
a = self.avgpool(feat_map)
b = self.maxpool(feat_map)
return torch.cat((a, b), 1)
elif (self.cfg.global_pool == 'AVG_MAX_LSE'):
a = self.avgpool(feat_map)
b = self.maxpool(feat_map)
c = self.lse_pool(feat_map)
return torch.cat((a, b, c), 1)
elif (self.cfg.global_pool == 'EXP'):
return self.exp_pool(feat_map)
elif (self.cfg.global_pool == 'LINEAR'):
return self.linear_pool(feat_map)
elif (self.cfg.global_pool == 'LSE'):
return self.lse_pool(feat_map)
else:
raise Exception('Unknown pooling type : {}'.format(self.cfg.global_pool)) |
def setup_args():
description = 'Collect codec metrics.'
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest='codec', help='Select codec')
subparsers.required = True
return (parser, subparsers) |
def get_result_batch(exp_name_list, res, res_name):
res_list = []
for exp_name in exp_name_list:
(cur_res_mean, cur_res_std) = get_result(exp_name, res, res_name)
res_list.append([cur_res_mean, cur_res_std])
return res_list |
def get_start_time(line_iterable, year):
start_datetime = None
for line in line_iterable:
line = line.strip()
if (line.find('Solving') != (- 1)):
start_datetime = extract_datetime_from_line(line, year)
break
return start_datetime |
_model
def bat_resnext26ts(pretrained=False, **kwargs):
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) |
class AntNavPrimeEnv(AntEnv):
def __init__(self, max_path_length, goal_range=15.0, num_goal_steps=50, **kwargs):
self.max_path_length = max_path_length
self.goal_range = goal_range
self.num_goal_steps = num_goal_steps
self.cur_goal = np.random.uniform((- self.goal_range), self.goal_range, (2,))
self.num_steps = 0
super().__init__(**kwargs)
utils.EzPickle.__init__(self, max_path_length=max_path_length, goal_range=goal_range, num_goal_steps=num_goal_steps, **kwargs)
def _set_observation_space(self, observation):
self.observation_space = convert_observation_to_space(observation)
low = np.full((2,), (- float('inf')), dtype=np.float32)
high = np.full((2,), float('inf'), dtype=np.float32)
return akro.concat(self.observation_space, akro.Box(low=low, high=high, dtype=self.observation_space.dtype))
def reset_model(self):
self.cur_goal = np.random.uniform((- self.goal_range), self.goal_range, (2,))
self.num_steps = 0
return super().reset_model()
def _get_obs(self):
obs = super()._get_obs()
obs = np.concatenate([obs, self.cur_goal])
return obs
def _get_done(self):
return (self.num_steps == self.max_path_length)
def compute_reward(self, xposbefore, yposbefore, xposafter, yposafter):
self.num_steps += 1
delta = np.linalg.norm((self.cur_goal - np.array([xposafter, yposafter])))
if ((self.num_steps % self.num_goal_steps) == 0):
reward = (- delta)
else:
reward = 0.0
if ((self.num_steps % self.num_goal_steps) == 0):
self.cur_goal = np.array([np.random.uniform((xposafter - self.goal_range), (xposafter + self.goal_range)), np.random.uniform((yposafter - self.goal_range), (yposafter + self.goal_range))])
return reward |
def extract_acc_from_summary_path(summary_path):
with open(summary_path, 'r') as f:
summary = json.load(f)
acc_dict = {}
for s in summary:
(box_a, box_b) = (s['box1'], s['pred_box1'])
center_a = np.array([box_a['center_x'], box_a['center_y'], box_a['center_z']])
center_b = np.array([box_b['center_x'], box_b['center_y'], box_b['center_z']])
acc_dict[str(s['frame_idx'])] = np.linalg.norm((center_a - center_b), ord=2)
return acc_dict |
class DebertaV2Tokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, split_by_punct=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.do_lower_case = do_lower_case
self.split_by_punct = split_by_punct
self.vocab_file = vocab_file
self._tokenizer = SPMTokenizer(vocab_file, self.all_special_tokens, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs)
def vocab_size(self):
return len(self.vocab)
def vocab(self):
return self._tokenizer.vocab
def get_vocab(self):
vocab = self.vocab.copy()
vocab.update(self.get_added_vocab())
return vocab
def _tokenize(self, text: str) -> List[str]:
if self.do_lower_case:
text = text.lower()
return self._tokenizer.tokenize(text)
def _convert_token_to_id(self, token):
return self._tokenizer.spm.PieceToId(token)
def _convert_id_to_token(self, index):
return (self._tokenizer.spm.IdToPiece(index) if (index < self.vocab_size) else self.unk_token)
def convert_tokens_to_string(self, tokens):
return self._tokenizer.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', False)
if (is_split_into_words or add_prefix_space):
text = (' ' + text)
return (text, kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix) |
def feed_dict(train):
global x, y_, keep_prob
if (train or FLAGS.fake_data):
(xs, ys) = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
k = FLAGS.dropout
else:
(xs, ys) = (mnist.test.images, mnist.test.labels)
k = 1.0
return {x: xs, y_: ys, keep_prob: k} |
def mocked_simulator_binaries():
with patch.object(path, 'exists', return_value=True, autospec=True), patch.object(path, 'getsize', return_value=1000, autospec=True):
(yield) |
class TensorflowCriterions(object):
def __init__(self):
self.criterions = {}
self.criterions.update(TENSORFLOW_CRITERIONS) |
def makeplot_qual(eval_path, robot):
experiments = {}
if (plt_cfg['ql'] == 2):
global ax
plt.figure(robot)
ax = plt.axes(projection='3d')
for p in os.listdir(eval_path):
filepath = (eval_path + p)
planner = p.replace('.csv', '')
title = ((robot + '_') + planner)
if (planner in plt_cfg['planner']):
if (plt_cfg['ql'] == 1):
plt.figure(title)
quali = qual(filepath, planner, True)
evals = quali.plot_trj()
dist = evals[('tr_len_' + planner)]
dur = evals[('runtime_' + planner)]
ev = evals[('eev_' + planner)]
smooth = evals[('smoothness' + planner)]
experiments[((title + ': ') + 'tr_len')] = dist
experiments[((title + ': ') + 'dur')] = dur
experiments[((title + ': ') + 'eev')] = ev
experiments[((title + ': ') + 'smooth')] = smooth
if (plt_cfg['ql'] == 1):
plt.legend()
plt.savefig((('quali/' + title) + '.png'), bbox_inches='tight')
elif (plt_cfg['ql'] == 2):
plt.legend()
plt.savefig((('quali/' + robot) + '.png'), bbox_inches='tight')
if (plt_cfg['qt'] == 1):
makeplot_bar(dur, planner, ('duration_' + title))
makeplot_bar(dist, planner, ('path length_' + title))
if (plt_cfg['qt'] == 2):
makeplot_bar(experiments, '', robot)
return experiments |
class TestTransactionDB(unittest.TestCase):
def test_init(self):
rows1 = [[1, 1, 0, 0], [1, 1, 0, 1], [0, 0, 1, 1], [0, 1, 0, 1]]
header1 = ['A', 'B', 'C', 'Y']
transDB1 = TransactionDB(rows1, header1, unique_transactions=False)
transaction1 = Transaction([1, 1, 0], 'ABC', Item('Y', 0))
class_labels = [Item('Y', 0), Item('Y', 1), Item('Y', 1), Item('Y', 1)]
assert (transDB1.class_labels == class_labels)
assert (transDB1.classes == ['0', '1', '1', '1'])
assert (transDB1.data[0] == transaction1)
def test_len(self):
rows1 = [[1, 1, 0, 0], [1, 1, 0, 1], [0, 0, 1, 1], [0, 1, 0, 1]]
header1 = ['A', 'B', 'C', 'Y']
transDB1 = TransactionDB(rows1, header1)
assert (len(transDB1) == 4) |
class MultiCategory(ItemBase):
def __init__(self, data, obj, raw):
(self.data, self.obj, self.raw) = (data, obj, raw)
def __str__(self):
return ';'.join([str(o) for o in self.obj])
def __hash__(self):
return hash(str(self)) |
def test_logreg_l1_sparse_data():
rng = np.random.RandomState(42)
n_samples = 50
(X, y) = make_classification(n_samples=n_samples, n_features=20, random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[(X < 1)] = 0
X = sparse.csr_matrix(X)
lr_miso = LogisticRegression(penalty='l1', lambda_1=1.0, solver='miso', fit_intercept=False, multi_class='ovr', tol=1e-10)
lr_miso.fit(X, y)
lr_saga = LogisticRegression(penalty='l1', lambda_1=1.0, solver='ista', fit_intercept=False, multi_class='ovr', max_iter=1000, tol=1e-10)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_miso.coef_)
assert_array_almost_equal(lr_miso.coef_[(- 5):], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[(- 5):], np.zeros(5))
lr_saga_dense = LogisticRegression(penalty='l1', lambda_1=1.0, solver='ista', fit_intercept=False, multi_class='ovr', max_iter=1000, tol=1e-10)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_) |
.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_nms_bev():
np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32)
np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32)
np_inds = np.array([1, 0, 3])
boxes = torch.from_numpy(np_boxes)
scores = torch.from_numpy(np_scores)
inds = nms_bev(boxes.cuda(), scores.cuda(), thresh=0.3)
assert np.allclose(inds.cpu().numpy(), np_inds) |
class FCN8(EvalOnlyModel):
def __init__(self, img_channels=3, normalize_outputs=False, **kwargs):
super(FCN8, self).__init__(**kwargs)
self.conv1 = _make_layer(img_channels, 64, kernel_size=8, stride=4, padding=2)
self.conv2 = _make_layer(64, 128, kernel_size=3, stride=2, padding=1)
self.conv3 = _make_layer(128, 256, kernel_size=3, stride=2, padding=1)
self.conv4 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.conv5 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.conv6 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.conv7 = _make_layer(256, 128, kernel_size=3, stride=1, padding=1)
self.conv8 = _make_layer(128, 8, kernel_size=3, stride=1, padding=1)
self.skip1 = _make_layer(128, 256, kernel_size=3, stride=2, padding=1)
self.skip2 = _make_layer(256, 256, kernel_size=3, stride=1, padding=1)
self.skip3 = _make_layer(256, 8, kernel_size=3, stride=1, padding=1)
self.normalize_outputs = normalize_outputs
if self.normalize_outputs:
self.groupnorm = nn.GroupNorm(2, 8)
def forward(self, x, task_idx: int=(- 1), cache={}):
x = self.conv1(x)
x = self.conv2(x)
x2 = x
x = self.conv3(x)
x = self.conv4(x)
x = (x + self.skip1(x2))
x4 = x
x = self.conv5(x)
x = self.conv6(x)
x = (x + self.skip2(x4))
x6 = x
x = self.conv7(x)
x = self.conv8(x)
x = (x + self.skip3(x6))
if self.normalize_outputs:
x = self.groupnorm(x)
return x |
def test_save_and_load_dict():
wide = Wide(np.unique(X_wide).shape[0], 1)
tabmlp = TabMlp(mlp_hidden_dims=[32, 16], column_idx={k: v for (v, k) in enumerate(colnames)}, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):])
model1 = WideDeep(wide=deepcopy(wide), deeptabular=deepcopy(tabmlp))
trainer1 = Trainer(model1, objective='binary', verbose=0)
trainer1.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16)
wide_weights = model1.wide.wide_linear.weight.data
trainer1.save(path='tests/test_model_functioning/model_dir/', save_state_dict=True)
model2 = WideDeep(wide=wide, deeptabular=tabmlp)
trainer2 = Trainer(model2, objective='binary', verbose=0)
trainer2.model.load_state_dict(torch.load('tests/test_model_functioning/model_dir/wd_model.pt'))
n_wide_weights = trainer2.model.wide.wide_linear.weight.data
same_weights = torch.allclose(wide_weights, n_wide_weights)
if os.path.isfile('tests/test_model_functioning/model_dir/history/train_eval_history.json'):
history_saved = True
else:
history_saved = False
shutil.rmtree('tests/test_model_functioning/model_dir/')
assert (same_weights and history_saved) |
def _laplace(x, sigma: Union[(int, float)]=2):
return (np.exp(((- abs(x)) / sigma)) / (2.0 * sigma)) |
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range((2 + i)):
stride = (2 if (reduction and (j < 2)) else 1)
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum((self._ops[(offset + j)](h, weights[(offset + j)]) for (j, h) in enumerate(states)))
offset += len(states)
states.append(s)
return torch.cat(states[(- self._multiplier):], dim=1) |
def cifar10():
return collect_download_configs((lambda : datasets.CIFAR10(ROOT, download=True)), name='CIFAR10') |
def get_all_supported_ops(ops_file_path: str):
with open(ops_file_path, 'r') as f:
lines = f.readlines()
skip_in_kws = []
name_2_op_params = {}
for line in lines:
splitted = line.split('`')
if (len(splitted) <= 2):
print(f'skipped: {line}')
continue
last_name = splitted[1]
if any([(kw in last_name) for kw in skip_in_kws]):
continue
op_name = f'tf.raw_ops.{last_name}'
try:
func = eval(op_name[len('tf.'):])
except Exception as e:
print(f'Error when eval {op_name}: {e}')
continue
if (not callable(func)):
raise RuntimeError(f'{func} is not callable')
else:
name_2_op_params[op_name] = (func, list(func.__signature__.parameters.values()))
return name_2_op_params |
def vgg11_bn(**kwargs):
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model |
def sqrt(x, epsilon):
approx = (x / 2)
while (abs((x - approx)) > epsilon):
approx = (0.5 * (approx + (x / approx)))
return approx |
def abs_batch_size_fn(new, count):
(src, tgt) = (new[0], new[1])
global max_n_sents, max_n_tokens, max_size
if (count == 1):
max_size = 0
max_n_sents = 0
max_n_tokens = 0
max_n_sents = max(max_n_sents, len(tgt))
max_size = max(max_size, max_n_sents)
src_elements = (count * max_size)
if (count > 6):
return (src_elements + 1000.0)
return src_elements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.