code stringlengths 281 23.7M |
|---|
def test_multiple_decorators():
_along_last_axis
_type_conversion
def half_vec(x):
assert (x.ndim == 1)
return x[:(len(x) // 2)]
for shape in [(10,), (2, 10), (2, 2, 10)]:
for dtype in [np.float32, np.float16, np.float64]:
x = np.ones(shape, dtype=dtype)
y = half_vec(x)
xshape = x.shape
yshape = y.shape
assert (len(xshape) == len(yshape))
assert ((xshape[(- 1)] // 2) == yshape[(- 1)])
assert (x.dtype == y.dtype) |
def attach_object_to_vehicle(object_id: int, vehicle_id: int, offset_x: float, offset_y: float, offset_z: float, rotation_x: float, rotation_y: float, rotation_z: float) -> bool:
return AttachObjectToVehicle(object_id, vehicle_id, offset_x, offset_y, offset_z, rotation_x, rotation_y, rotation_z) |
class WeylQuantizationTest(unittest.TestCase):
def test_weyl_empty(self):
res = weyl_polynomial_quantization('')
self.assertTrue((res == QuadOperator.zero()))
def test_weyl_one_term(self):
op = QuadOperator('q0')
res = weyl_polynomial_quantization('q0')
self.assertTrue((res == op))
def test_weyl_one_term_multimode(self):
op = QuadOperator('q0 q1 p2 p3')
res = weyl_polynomial_quantization('q0 q1 p2 p3')
self.assertTrue((res == op))
def test_weyl_two_term_same(self):
op = QuadOperator('q0 q0')
res = weyl_polynomial_quantization('q0^2')
self.assertTrue((res == op))
def test_weyl_non_hermitian(self):
res = weyl_polynomial_quantization('q0 p0')
expected = (QuadOperator('q0 p0', 0.5) + QuadOperator('p0 q0', 0.5))
self.assertTrue((res == expected))
self.assertTrue(is_hermitian(res))
res = weyl_polynomial_quantization('q0^2 p0')
expected = (QuadOperator('q0 q0 p0', 0.5) + QuadOperator('p0 q0 q0', 0.5))
self.assertTrue((res == expected))
self.assertTrue(is_hermitian(res)) |
def orth_reg(net, loss, cof=1):
orth_loss = 0
for m in net.modules():
if isinstance(m, nn.Linear):
w = m.weight
dimension = w.size()[0]
eye_ = Variable(torch.eye(dimension), requires_grad=False).cuda()
diff = (torch.matmul(w, w.t()) - eye_)
mask_ = (eye_ == 0)
diff = torch.masked_select(diff, mask=mask_)
_loss = torch.mean(torch.abs(diff))
orth_loss += (cof * _loss)
loss = (loss + orth_loss)
return loss |
()
def daily_update_placements(day=None):
(start_date, end_date) = get_day(day)
log.info('Updating PlacementImpressions for %s-%s', start_date, end_date)
queryset = Offer.objects.using(settings.REPLICA_SLUG).filter(date__gte=start_date, date__lt=end_date)
for values in queryset.values('publisher', 'advertisement', 'div_id', 'ad_type_slug').annotate(total_decisions=Count('div_id'), total_offers=Count('div_id', filter=Q(advertisement__isnull=False)), total_views=Count('div_id', filter=Q(viewed=True)), total_clicks=Count('div_id', filter=Q(clicked=True))).filter(total_decisions__gt=0).filter(publisher__record_placements=True).exclude(div_id__regex='(rtd-\\w{4}|ad_\\w{4}).*').order_by('-total_decisions').iterator():
(impression, _) = PlacementImpression.objects.using('default').get_or_create(publisher_id=values['publisher'], advertisement_id=values['advertisement'], div_id=values['div_id'], ad_type_slug=values['ad_type_slug'], date=start_date)
PlacementImpression.objects.using('default').filter(pk=impression.pk).update(decisions=values['total_decisions'], offers=values['total_offers'], views=values['total_views'], clicks=values['total_clicks']) |
class RCC_APB1LPENR(IntEnum):
TIM2LPEN = (1 << 0)
TIM3LPEN = (1 << 1)
TIM4LPEN = (1 << 2)
TIM5LPEN = (1 << 3)
WWDGLPEN = (1 << 11)
SPI2LPEN = (1 << 14)
SPI3LPEN = (1 << 15)
USART2LPEN = (1 << 17)
I2C1LPEN = (1 << 21)
I2C2LPEN = (1 << 22)
I2C3LPEN = (1 << 23)
PWRLPEN = (1 << 28) |
def build_v2_index_specs():
return [IndexV2TestSpec('v2.list_all_tags', 'GET', PUBLIC_REPO).request_status(200, 200, 200, 200, 200), IndexV2TestSpec('v2.list_all_tags', 'GET', PRIVATE_REPO).request_status(401, 401, 200, 401, 200), IndexV2TestSpec('v2.list_all_tags', 'GET', ORG_REPO).request_status(401, 401, 200, 401, 200), IndexV2TestSpec('v2.list_all_tags', 'GET', ANOTHER_ORG_REPO).request_status(401, 401, 401, 401, 200), IndexV2TestSpec('v2.fetch_manifest_by_tagname', 'GET', PUBLIC_REPO, manifest_ref=FAKE_MANIFEST).request_status(404, 404, 404, 404, 404), IndexV2TestSpec('v2.fetch_manifest_by_tagname', 'GET', PRIVATE_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.fetch_manifest_by_tagname', 'GET', ORG_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.fetch_manifest_by_tagname', 'GET', ANOTHER_ORG_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.fetch_manifest_by_digest', 'GET', PUBLIC_REPO, manifest_ref=FAKE_DIGEST).request_status(404, 404, 404, 404, 404), IndexV2TestSpec('v2.fetch_manifest_by_digest', 'GET', PRIVATE_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.fetch_manifest_by_digest', 'GET', ORG_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.fetch_manifest_by_digest', 'GET', ANOTHER_ORG_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.write_manifest_by_tagname', 'PUT', PUBLIC_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.write_manifest_by_tagname', 'PUT', PRIVATE_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.write_manifest_by_tagname', 'PUT', ORG_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.write_manifest_by_tagname', 'PUT', ANOTHER_ORG_REPO, manifest_ref=FAKE_MANIFEST).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.write_manifest_by_digest', 'PUT', PUBLIC_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.write_manifest_by_digest', 'PUT', PRIVATE_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.write_manifest_by_digest', 'PUT', ORG_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.write_manifest_by_digest', 'PUT', ANOTHER_ORG_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.delete_manifest_by_digest', 'DELETE', PUBLIC_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.delete_manifest_by_digest', 'DELETE', PRIVATE_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.delete_manifest_by_digest', 'DELETE', ORG_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.delete_manifest_by_digest', 'DELETE', ANOTHER_ORG_REPO, manifest_ref=FAKE_DIGEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.check_blob_exists', 'HEAD', PUBLIC_REPO, digest=FAKE_DIGEST).request_status(404, 404, 404, 404, 404), IndexV2TestSpec('v2.check_blob_exists', 'HEAD', PRIVATE_REPO, digest=FAKE_DIGEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.check_blob_exists', 'HEAD', ORG_REPO, digest=FAKE_DIGEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.check_blob_exists', 'HEAD', ANOTHER_ORG_REPO, digest=FAKE_DIGEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.download_blob', 'GET', PUBLIC_REPO, digest=FAKE_DIGEST).request_status(404, 404, 404, 404, 404), IndexV2TestSpec('v2.download_blob', 'GET', PRIVATE_REPO, digest=FAKE_DIGEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.download_blob', 'GET', ORG_REPO, digest=FAKE_DIGEST).request_status(401, 401, 404, 401, 404), IndexV2TestSpec('v2.download_blob', 'GET', ANOTHER_ORG_REPO, digest=FAKE_DIGEST).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.start_blob_upload', 'POST', PUBLIC_REPO).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.start_blob_upload', 'POST', PRIVATE_REPO).request_status(401, 401, 401, 401, 202), IndexV2TestSpec('v2.start_blob_upload', 'POST', ORG_REPO).request_status(401, 401, 401, 401, 202), IndexV2TestSpec('v2.start_blob_upload', 'POST', ANOTHER_ORG_REPO).request_status(401, 401, 401, 401, 202), IndexV2TestSpec('v2.fetch_existing_upload', 'GET', PUBLIC_REPO, 'push,pull', upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.fetch_existing_upload', 'GET', PRIVATE_REPO, 'push,pull', upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.fetch_existing_upload', 'GET', ORG_REPO, 'push,pull', upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.fetch_existing_upload', 'GET', ANOTHER_ORG_REPO, 'push,pull', upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.upload_chunk', 'PATCH', PUBLIC_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.upload_chunk', 'PATCH', PRIVATE_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.upload_chunk', 'PATCH', ORG_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.upload_chunk', 'PATCH', ANOTHER_ORG_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.monolithic_upload_or_last_chunk', 'PUT', PUBLIC_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.monolithic_upload_or_last_chunk', 'PUT', PRIVATE_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.monolithic_upload_or_last_chunk', 'PUT', ORG_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.monolithic_upload_or_last_chunk', 'PUT', ANOTHER_ORG_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 400), IndexV2TestSpec('v2.cancel_upload', 'DELETE', PUBLIC_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 401), IndexV2TestSpec('v2.cancel_upload', 'DELETE', PRIVATE_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.cancel_upload', 'DELETE', ORG_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404), IndexV2TestSpec('v2.cancel_upload', 'DELETE', ANOTHER_ORG_REPO, upload_uuid=FAKE_UPLOAD_ID).request_status(401, 401, 401, 401, 404)] |
class ExitCommand(command.Command):
obj = None
def func(self):
if self.obj.access(self.caller, 'traverse'):
self.obj.at_traverse(self.caller, self.obj.destination)
elif self.obj.db.err_traverse:
self.caller.msg(self.obj.db.err_traverse)
else:
self.obj.at_failed_traverse(self.caller)
def get_extra_info(self, caller, **kwargs):
if self.obj.destination:
return (' (exit to %s)' % self.obj.destination.get_display_name(caller))
else:
return (' (%s)' % self.obj.get_display_name(caller)) |
def load_data(args, dataset_name):
data_loader = load_partition_data_FashionMNIST
(train_data_num, test_data_num, train_data_global, test_data_global, train_data_local_num_dict, test_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num_train, class_num_test) = data_loader(args.dataset, args.data_dir, args.partition_method, args.partition_alpha, args.client_number, args.batch_size)
dataset = [train_data_num, test_data_num, train_data_global, test_data_global, train_data_local_num_dict, test_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num_train, class_num_test]
return dataset |
def dicom_file_loader(accept_multiple_files: bool, stop_before_pixels: bool) -> Sequence['pydicom.Dataset']:
(left_column, right_column) = st.columns(2)
if accept_multiple_files:
file_string = 'files'
else:
file_string = 'file'
with left_column:
st.write(f'## Upload DICOM {file_string}')
files: UploadedFiles = st.file_uploader(f'DICOM {file_string}', accept_multiple_files=accept_multiple_files)
if (not files):
st.stop()
assumed_sequence = cast(Files, files)
try:
assumed_sequence[0]
except TypeError:
a_single_file = cast(File, files)
files = [a_single_file]
files = cast(Files, files)
datasets = []
for a_file in files:
try:
a_file.seek(0)
dataset = pydicom.dcmread(a_file, force=True, stop_before_pixels=stop_before_pixels)
except Exception as e:
st.warning(f'Failed reading the file "`{a_file.name}`". The error was the following:')
st.error(e)
st.stop()
raise
datasets.append(dataset)
patient_id_filenames_map = collections.defaultdict(set)
patient_id_names_map = collections.defaultdict(set)
for (dataset, a_file) in zip(datasets, files):
patient_id = dataset.PatientID
patient_name = _dcm_utilities.pretty_patient_name(dataset)
patient_id_filenames_map[patient_id].add(a_file.name)
patient_id_names_map[patient_id].add(patient_name)
with right_column:
st.write('## Details')
for (patient_id, filenames) in patient_id_filenames_map.items():
patient_names = patient_id_names_map[patient_id]
st.write(f'''
* {_optionally_write_with_plural('Filename', filenames)}
* Patient ID: `{patient_id}`
* {_optionally_write_with_plural('Patient Name', patient_names)}
''')
return datasets |
class TrueWind(BaseWind):
def __init__(self, client, boatimu):
super(TrueWind, self).__init__(client, 'truewind', boatimu)
def compute_true_wind_direction(water_speed, wind_speed, wind_direction):
rd = math.radians(wind_direction)
windv = ((wind_speed * math.sin(rd)), ((wind_speed * math.cos(rd)) - water_speed))
truewind = math.degrees(math.atan2(*windv))
return truewind
def compute_true_wind_speed(water_speed, wind_speed, wind_direction):
rd = math.radians(wind_direction)
windv = ((wind_speed * math.sin(rd)), ((wind_speed * math.cos(rd)) - water_speed))
return math.hypot(*windv)
def update_from_apparent(self, boat_speed, wind_speed, wind_direction):
if ((self.source.value == 'water+wind') or (self.source.value == 'gps+wind')):
self.direction.set(TrueWind.compute_true_wind_direction(boat_speed, wind_speed, wind_direction))
self.wdirection = self.direction.value
self.wfactor = 0.05
self.lastupdate = time.monotonic() |
def test_filewritejson_filewritejson_not_iterable_raises():
context = Context({'k1': 'v1', 'fileWriteJson': 1})
with pytest.raises(ContextError) as err_info:
filewrite.run_step(context)
assert (str(err_info.value) == "context['fileWriteJson'] must exist, be iterable and contain 'path' for pypyr.steps.filewritejson. argument of type 'int' is not iterable") |
def highwaynet(inputs, scope, depth):
with tf.variable_scope(scope):
H = tf.layers.dense(inputs, units=depth, activation=tf.nn.relu, name='H')
T = tf.layers.dense(inputs, units=depth, activation=tf.nn.sigmoid, name='T', bias_initializer=tf.constant_initializer((- 1.0)))
return ((H * T) + (inputs * (1.0 - T))) |
def test_clean_comment():
from frigate.gen import clean_comment
assert (clean_comment('# hello world') == 'hello world')
assert (clean_comment('hello world') == 'hello world')
assert (clean_comment('## # ## ## hello world') == 'hello world')
assert (clean_comment(' # hello world ') == 'hello world') |
def test_lambert_conformat_conic_1sp_operation():
aeaop = LambertConformalConic1SPConversion(latitude_natural_origin=1, longitude_natural_origin=2, false_easting=3, false_northing=4, scale_factor_natural_origin=0.5)
assert (aeaop.name == 'unknown')
assert (aeaop.method_name == 'Lambert Conic Conformal (1SP)')
assert (_to_dict(aeaop) == {'Latitude of natural origin': 1.0, 'Longitude of natural origin': 2.0, 'False easting': 3.0, 'False northing': 4.0, 'Scale factor at natural origin': 0.5}) |
class TornadoRoleTest(ProvyTestCase):
def setUp(self):
super(TornadoRoleTest, self).setUp()
self.role = TornadoRole(prov=None, context={})
def installs_necessary_packages_to_provision(self):
with self.using_stub(AptitudeRole) as aptitude, self.using_stub(PipRole) as pip:
self.role.provision()
aptitude.ensure_up_to_date.assert_called_once_with()
aptitude.ensure_package_installed.assert_called_once_with('python-pycurl')
pip.ensure_package_installed.assert_called_once_with('tornado') |
def conv2d_transpose(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
(kernel_h, kernel_w) = kernel_size
num_in_channels = inputs.get_shape()[(- 1)].value
kernel_shape = [kernel_h, kernel_w, num_output_channels, num_in_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
(stride_h, stride_w) = stride
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if ((padding == 'VALID') and (dim_size is not None)):
dim_size += max((kernel_size - stride_size), 0)
return dim_size
batch_size = inputs.get_shape()[0].value
height = inputs.get_shape()[1].value
width = inputs.get_shape()[2].value
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = [batch_size, out_height, out_width, num_output_channels]
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training, bn_decay=bn_decay, scope='bn')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
('build')
('flavour')
('--logs', '-l', is_flag=True, help='Show build logs')
def build(flavour: str, logs: bool) -> None:
if (not run_checks(flavour)):
return
flavour_config = get_key_values_from_config(flavour)
flavour_dockerfile = create_dockerfile(flavour_config['base'], flavour_config['install'], flavour_config['cmd'])
try:
client = APIClient(base_url='unix://var/run/docker.sock')
with console.status('Building custom flavour...', spinner='point'):
for line in client.build(fileobj=BytesIO(flavour_dockerfile.encode('utf-8')), tag=f'raiyanyahya/freshenv-flavours/{flavour}', rm=True, pull=True, decode=True):
if ('errorDetail' in line):
raise Exception(line['errorDetail']['message'])
if logs:
print(line)
print(f':party_popper: Successfully built custom flavour {flavour}. You can provision it by running [bold]freshenv provision -f {flavour}[/bold].')
except (errors.APIError, exceptions.HTTPError):
print(':x: Custom flavour could not be built. Try again after cleaning up with [bold]fr clean --force [/bold]')
except Exception as e:
print(f':x: Custom flavour could not be built due to the error: {e}.') |
class Cabinet(models.Model):
idc = models.ForeignKey('IDC', related_name='cabinet', on_delete=models.CASCADE)
cabinet_name = models.CharField(max_length=64, unique=True, verbose_name='')
cabinet_memo = models.CharField(max_length=100, blank=True, null=True, verbose_name='')
class Meta():
db_table = 'ops_cabinet'
verbose_name = ''
verbose_name_plural = '' |
def test_remove_world_from_session(server_app):
session = {'worlds': [1234]}
server_app.session = MagicMock()
server_app.session.return_value.__enter__.return_value = session
world = MagicMock()
world.id = 1234
server_app.remove_world_from_session(world)
assert (session == {'worlds': []}) |
def test_build_overviews_new_file(tmpdir, path_rgb_byte_tif):
dst_file = str(tmpdir.join('test.tif'))
with rasterio.open(path_rgb_byte_tif) as src:
with rasterio.open(dst_file, 'w', **src.profile) as dst:
dst.write(src.read())
overview_factors = [2, 4]
dst.build_overviews(overview_factors, resampling=OverviewResampling.nearest)
with rasterio.open(dst_file, overview_level=1) as src:
data = src.read()
assert data.any() |
def main():
batch_size = 16
data_path = './data/nyu_depth_v2_labeled.mat'
learning_rate = 0.0001
monentum = 0.9
weight_decay = 0.0005
num_epochs = 100
(train_lists, val_lists, test_lists) = load_split()
print('Loading data...')
train_loader = torch.utils.data.DataLoader(NyuDepthLoader(data_path, train_lists), batch_size=batch_size, shuffle=False, drop_last=True)
val_loader = torch.utils.data.DataLoader(NyuDepthLoader(data_path, val_lists), batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(NyuDepthLoader(data_path, test_lists), batch_size=batch_size, shuffle=True, drop_last=True)
print(train_loader)
print('Loading model...')
model = FCRN(batch_size)
model.load_state_dict(load_weights(model, weights_file, dtype))
resume_from_file = False
resume_file = './model/model_300.pth'
if resume_from_file:
if os.path.isfile(resume_file):
checkpoint = torch.load(resume_file)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print("loaded checkpoint '{}' (epoch {})".format(resume_file, checkpoint['epoch']))
else:
print('can not find!')
model = model.cuda()
loss_fn = loss_huber()
print('loss_fn set...')
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
print('optimizer set...')
best_val_err = 0.0001
start_epoch = 0
for epoch in range(num_epochs):
print(('Starting train epoch %d / %d' % (((start_epoch + epoch) + 1), (num_epochs + start_epoch))))
model.train()
running_loss = 0
count = 0
epoch_loss = 0
for (input, depth) in train_loader:
input_var = Variable(input.type(dtype))
depth_var = Variable(depth.type(dtype))
output = model(input_var)
loss = loss_fn(output, depth_var)
print(('loss: %f' % loss.data.cpu().item()))
count += 1
running_loss += loss.data.cpu().numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss = (running_loss / count)
print('epoch loss:', epoch_loss)
model.eval()
(num_correct, num_samples) = (0, 0)
loss_local = 0
with torch.no_grad():
for (input, depth) in val_loader:
input_var = Variable(input.type(dtype))
depth_var = Variable(depth.type(dtype))
output = model(input_var)
if (num_epochs == (epoch + 1)):
input_rgb_image = input[0].data.permute(1, 2, 0)
input_gt_depth_image = depth_var[0][0].data.cpu().numpy().astype(np.float32)
pred_depth_image = output[0].data.squeeze().cpu().numpy().astype(np.float32)
input_gt_depth_image /= np.max(input_gt_depth_image)
pred_depth_image /= np.max(pred_depth_image)
plot.imsave('./result/input_rgb_epoch_{}.png'.format(((start_epoch + epoch) + 1)), input_rgb_image)
plot.imsave('./result/gt_depth_epoch_{}.png'.format(((start_epoch + epoch) + 1)), input_gt_depth_image, cmap='viridis')
plot.imsave('./result/pred_depth_epoch_{}.png'.format(((start_epoch + epoch) + 1)), pred_depth_image, cmap='viridis')
loss_local += loss_fn(output, depth_var)
num_samples += 1
err = (float(loss_local) / num_samples)
print(('val_error: %f' % err))
if ((err < best_val_err) or (epoch == (num_epochs - 1))):
best_val_err = err
torch.save({'epoch': ((start_epoch + epoch) + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, (('./model/model_' + str(((start_epoch + epoch) + 1))) + '.pth'))
if ((epoch % 10) == 0):
learning_rate = (learning_rate * 0.8) |
def dump_dataclass(obj: Any):
assert (dataclasses.is_dataclass(obj) and (not isinstance(obj, type))), 'dump_dataclass() requires an instance of a dataclass.'
ret = {'_target_': _convert_target_to_string(type(obj))}
for f in dataclasses.fields(obj):
v = getattr(obj, f.name)
if dataclasses.is_dataclass(v):
v = dump_dataclass(v)
if isinstance(v, (list, tuple)):
v = [(dump_dataclass(x) if dataclasses.is_dataclass(x) else x) for x in v]
ret[f.name] = v
return ret |
class _ConvNdMtl(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNdMtl, self).__init__()
if ((in_channels % groups) != 0):
raise ValueError('in_channels must be divisible by groups')
if ((out_channels % groups) != 0):
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(in_channels, (out_channels // groups), *kernel_size))
self.mtl_weight = Parameter(torch.ones(in_channels, (out_channels // groups), 1, 1))
else:
self.weight = Parameter(torch.Tensor(out_channels, (in_channels // groups), *kernel_size))
self.mtl_weight = Parameter(torch.ones(out_channels, (in_channels // groups), 1, 1))
self.weight.requires_grad = False
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
self.bias.requires_grad = False
self.mtl_bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.register_parameter('mtl_bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = (1.0 / math.sqrt(n))
self.weight.data.uniform_((- stdv), stdv)
self.mtl_weight.data.uniform_(1, 1)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
self.mtl_bias.data.uniform_(0, 0)
def extra_repr(self):
s = '{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
if (self.padding != ((0,) * len(self.padding))):
s += ', padding={padding}'
if (self.dilation != ((1,) * len(self.dilation))):
s += ', dilation={dilation}'
if (self.output_padding != ((0,) * len(self.output_padding))):
s += ', output_padding={output_padding}'
if (self.groups != 1):
s += ', groups={groups}'
if (self.bias is None):
s += ', bias=False'
return s.format(**self.__dict__) |
def test_save_action_additional_extensions(default_file):
existing_config(default_file)
opts = dict(author='author', email='email', license='MPL-2.0', my_extension1_opt=5)
extensions = [make_extension('MyExtension1'), make_extension('MyExtension2'), make_extension('MyExtension3', persist=False)]
config.save({}, {**opts, 'save_config': default_file, 'extensions': extensions})
parsed = info.read_setupcfg(default_file).to_dict()['pyscaffold']
print(default_file.read_text())
expected = {'namespace', 'tox', 'cirrus', 'my_extension1', 'my_extension2'}
assert (templates.parse_extensions(parsed['extensions']) == expected)
assert (parsed['namespace'] == 'my_namespace.my_sub_namespace')
assert (int(parsed['my_extension1_opt']) == 5) |
class GeometryOptimizer(lib.StreamObject):
def __init__(self, method):
self.method = method
self.callback = None
self.params = {}
self.converged = False
self.max_cycle = 100
def cell(self):
return self.method.cell
def cell(self, x):
self.method.cell = x
self.method.mol = x
def kernel(self, params=None):
if (params is not None):
self.params.update(params)
(self.converged, self.cell) = kernel(self.method, callback=self.callback, maxsteps=self.max_cycle, **self.params)
return self.cell
optimize = kernel |
class DistributedTest(TestCase):
def _test_fullsync(rank, world_size, backend, q):
dist.init_process_group(backend, rank=rank, world_size=world_size)
data_length = 23
dp = IterableWrapper(list(range(data_length))).sharding_filter()
torch.utils.data.graph_settings.apply_sharding(dp, world_size, rank)
dp1 = dp.fullsync()
for _ in range(2):
res = _dist_iterate_one_epoch(dp1)
assert (res == list(range(rank, ((data_length // world_size) * world_size), world_size)))
dp2 = dp.fullsync(timeout=0.01)
try:
for _ in range(2):
_ = list(dp2)
except Exception as e:
assert isinstance(e, PrefetchTimeoutError)
dp3 = dp.fullsync()
it = iter(dp3)
next(it)
dp3.pause()
it2 = iter(dp3)
next(it2)
dp4 = dp.prefetch(2)
it = iter(dp4)
next(it)
dp4.pause()
it2 = iter(dp4)
next(it2)
_finalize_distributed_queue(rank, q)
_size_parametrize
_parametrize
def test_fullsync(self, world_size, backend) -> None:
world_size = (world_size if (backend != 'nccl') else torch.cuda.device_count())
launch_distributed_training(backend, world_size, fn=DistributedTest._test_fullsync)
def _get_dataloader(data_length: int, dl2: bool, shuffle: bool, rs=None):
data_source = IterableWrapper(list(range(data_length)))
dp = data_source.sharding_filter()
if shuffle:
dp = dp.shuffle()
if dl2:
if (rs is None):
rs = DistributedReadingService()
dl = DataLoader2(dp, reading_service=rs)
else:
dp = dp.fullsync()
dl = DataLoader(dp)
return dl
def _test_distributed_training(dl2, rank, world_size, backend, q):
dist.init_process_group(backend, rank=rank, world_size=world_size)
data_length = 23
dl = DistributedTest._get_dataloader(data_length, dl2=dl2, shuffle=False)
res = _dist_iterate_one_epoch(dl)
assert (sorted(res) == list(range(rank, ((data_length // world_size) * world_size), world_size)))
dl = DistributedTest._get_dataloader(data_length, dl2=dl2, shuffle=True)
results = []
for _ in range(2):
res = _dist_iterate_one_epoch(dl, seed=123)
results.append(res)
assert (results[0] == results[1])
res = _dist_iterate_one_epoch(dl, seed=321)
results.append(res)
assert (len(results[0]) == len(results[2]))
assert (results[0] != results[2])
_finalize_distributed_queue(rank, q)
if dl2:
dl.shutdown()
_parametrize
def test_distributed_dl2(self, backend) -> None:
world_size = (DEFAULT_WORLD_SIZE if (backend != 'nccl') else torch.cuda.device_count())
launch_distributed_training(backend, world_size, fn=partial(DistributedTest._test_distributed_training, True))
_parametrize
def test_elastic_training_dl2(self, backend) -> None:
world_size = (DEFAULT_WORLD_SIZE if (backend != 'nccl') else torch.cuda.device_count())
nnodes = 1
from torch.distributed import run
run.main(['--run_path', f'--nnodes={nnodes}', f'--nproc_per_node={world_size}', abs_path('bin/elastic_training.py'), ('--' + backend), '--dl2'])
_parametrize
def test_distributed_dl1(self, backend) -> None:
world_size = (DEFAULT_WORLD_SIZE if (backend != 'nccl') else torch.cuda.device_count())
launch_distributed_training(backend, world_size, fn=partial(DistributedTest._test_distributed_training, False))
((sys.version_info < (3, 8)), 'Torch Elastic requires Python >= 3.8')
_parametrize
def test_elastic_training_dl1(self, backend) -> None:
world_size = (DEFAULT_WORLD_SIZE if (backend != 'nccl') else torch.cuda.device_count())
nnodes = 1
from torch.distributed import run
run.main(['--run_path', f'--nnodes={nnodes}', f'--nproc_per_node={world_size}', abs_path('bin/elastic_training.py'), ('--' + backend), '--dl1']) |
class Migration(migrations.Migration):
dependencies = [('adserver', '0003_publisher-advertiser-adtype')]
operations = [migrations.AddField(model_name='adimpression', name='publisher', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='adserver.Publisher')), migrations.AddField(model_name='click', name='publisher', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='adserver.Publisher')), migrations.AddField(model_name='view', name='publisher', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='adserver.Publisher')), migrations.AlterUniqueTogether(name='adimpression', unique_together=set([('publisher', 'advertisement', 'date')]))] |
def test_geoid_model_name():
wkt = 'COMPOUNDCRS["NAD83 / Pennsylvania South + NAVD88 height",\n PROJCRS["NAD83 / Pennsylvania South",\n BASEGEOGCRS["NAD83",\n DATUM["North American Datum 1983",\n ELLIPSOID["GRS 1980",6378137,298.,\n LENGTHUNIT["metre",1]]],\n PRIMEM["Greenwich",0,\n ANGLEUNIT["degree",0.]]],\n CONVERSION["SPCS83 Pennsylvania South zone (meters)",\n METHOD["Lambert Conic Conformal (2SP)",\n ID["EPSG",9802]],\n PARAMETER["Latitude of false origin",39.,\n ANGLEUNIT["degree",0.],\n ID["EPSG",8821]],\n PARAMETER["Longitude of false origin",-77.75,\n ANGLEUNIT["degree",0.],\n ID["EPSG",8822]],\n PARAMETER["Latitude of 1st standard parallel",40.,\n ANGLEUNIT["degree",0.],\n ID["EPSG",8823]],\n PARAMETER["Latitude of 2nd standard parallel",39.,\n ANGLEUNIT["degree",0.],\n ID["EPSG",8824]],\n PARAMETER["Easting at false origin",600000,\n LENGTHUNIT["metre",1],\n ID["EPSG",8826]],\n PARAMETER["Northing at false origin",0,\n LENGTHUNIT["metre",1],\n ID["EPSG",8827]]],\n CS[Cartesian,2],\n AXIS["easting (X)",east,\n ORDER[1],\n LENGTHUNIT["metre",1]],\n AXIS["northing (Y)",north,\n ORDER[2],\n LENGTHUNIT["metre",1]]],\n VERTCRS["NAVD88 height",\n VDATUM["North American Vertical Datum 1988"],\n CS[vertical,1],\n AXIS["gravity-related height (H)",up,\n LENGTHUNIT["metre",1]],\n GEOIDMODEL["GEOID12B"]]]'
crs = CRS(wkt)
param_dict = _to_dict(crs.sub_crs_list[0].coordinate_operation)
expected_cf = {'semi_major_axis': 6378137.0, 'semi_minor_axis': crs.ellipsoid.semi_minor_metre, 'inverse_flattening': crs.ellipsoid.inverse_flattening, 'reference_ellipsoid_name': 'GRS 1980', 'longitude_of_prime_meridian': 0.0, 'prime_meridian_name': 'Greenwich', 'geographic_crs_name': 'NAD83', 'horizontal_datum_name': 'North American Datum 1983', 'projected_crs_name': 'NAD83 / Pennsylvania South', 'grid_mapping_name': 'lambert_conformal_conic', 'standard_parallel': (param_dict['Latitude of 1st standard parallel'], param_dict['Latitude of 2nd standard parallel']), 'latitude_of_projection_origin': param_dict['Latitude of false origin'], 'longitude_of_central_meridian': (- 77.75), 'false_easting': 600000.0, 'false_northing': 0.0, 'geoid_name': 'GEOID12B', 'geopotential_datum_name': 'North American Vertical Datum 1988'}
cf_dict = crs.to_cf()
assert cf_dict.pop('crs_wkt').startswith('COMPOUNDCRS[')
assert (cf_dict == expected_cf)
_test_roundtrip(expected_cf, 'COMPOUNDCRS[')
assert (crs.cs_to_cf() == [{'axis': 'X', 'long_name': 'Easting', 'standard_name': 'projection_x_coordinate', 'units': 'metre'}, {'axis': 'Y', 'long_name': 'Northing', 'standard_name': 'projection_y_coordinate', 'units': 'metre'}, {'standard_name': 'height_above_reference_ellipsoid', 'long_name': 'Gravity-related height', 'units': 'metre', 'positive': 'up', 'axis': 'Z'}]) |
def test_state_wait_secretrequest_valid_amount_and_fee():
fee_amount = 5
setup = setup_initiator_tests(allocated_fee=fee_amount)
state_change = ReceiveSecretRequest(payment_identifier=UNIT_TRANSFER_IDENTIFIER, amount=(setup.lock.amount - fee_amount), expiration=setup.lock.expiration, secrethash=setup.lock.secrethash, sender=UNIT_TRANSFER_TARGET)
iteration = initiator_manager.state_transition(payment_state=setup.current_state, state_change=state_change, channelidentifiers_to_channels=setup.channel_map, addresses_to_channel=setup.channels.addresses_to_channel(), pseudo_random_generator=setup.prng, block_number=setup.block_number)
assert (search_for_item(iteration.events, SendSecretReveal, {}) is not None)
initiator_state = get_transfer_at_index(iteration.new_state, 0)
assert (initiator_state.received_secret_request is True)
initiator_state.received_secret_request = False
state_change_2 = ReceiveSecretRequest(payment_identifier=UNIT_TRANSFER_IDENTIFIER, amount=((setup.lock.amount - fee_amount) - 2), expiration=setup.lock.expiration, secrethash=setup.lock.secrethash, sender=UNIT_TRANSFER_TARGET)
iteration2 = initiator_manager.state_transition(payment_state=iteration.new_state, state_change=state_change_2, channelidentifiers_to_channels=setup.channel_map, addresses_to_channel=setup.channels.addresses_to_channel(), pseudo_random_generator=setup.prng, block_number=setup.block_number)
assert (len(iteration2.events) == 1)
assert (search_for_item(iteration2.events, EventInvalidSecretRequest, {}) is not None) |
def test_struct_comparison2():
m = run_mod('\n #lang pycket\n (require racket/private/generic-interfaces)\n\n (struct lead (width height)\n #:methods\n gen:equal+hash\n [(define (equal-proc a b equal?-recur)\n ; compare a and b\n (and (equal?-recur (lead-width a) (lead-width b))\n (equal?-recur (lead-height a) (lead-height b))))\n (define (hash-proc a hash-recur)\n ; compute primary hash code of a\n (+ (hash-recur (lead-width a))\n (* 3 (hash-recur (lead-height a)))))\n (define (hash2-proc a hash2-recur)\n ; compute secondary hash code of a\n (+ (hash2-recur (lead-width a))\n (hash2-recur (lead-height a))))])\n\n (define result (equal? (lead 1 2) (lead 1 2)))\n ')
assert (m.defs[W_Symbol.make('result')] == w_true) |
def test_Join_view():
vals = (set_test_value(pt.matrix(), rng.normal(size=(2, 2)).astype(config.floatX)), set_test_value(pt.matrix(), rng.normal(size=(2, 2)).astype(config.floatX)))
g = ptb.Join(view=1)(1, *vals)
g_fg = FunctionGraph(outputs=[g])
with pytest.raises(NotImplementedError):
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
def CheckForBadCharacters(filename, lines, error):
for (linenum, line) in enumerate(lines):
if (u'' in line):
error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).')
if ('\x00' in line):
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') |
class Describe_ChunkParser():
def it_can_construct_from_a_stream(self, stream_, StreamReader_, stream_rdr_, _ChunkParser__init_):
chunk_parser = _ChunkParser.from_stream(stream_)
StreamReader_.assert_called_once_with(stream_, BIG_ENDIAN)
_ChunkParser__init_.assert_called_once_with(ANY, stream_rdr_)
assert isinstance(chunk_parser, _ChunkParser)
def it_can_iterate_over_the_chunks_in_its_png_stream(self, _iter_chunk_offsets_, _ChunkFactory_, stream_rdr_, chunk_, chunk_2_):
offsets = [2, 4, 6]
chunk_lst = [chunk_, chunk_2_]
chunk_parser = _ChunkParser(stream_rdr_)
chunks = list(chunk_parser.iter_chunks())
_iter_chunk_offsets_.assert_called_once_with(chunk_parser)
assert (_ChunkFactory_.call_args_list == [call(PNG_CHUNK_TYPE.IHDR, stream_rdr_, offsets[0]), call(PNG_CHUNK_TYPE.pHYs, stream_rdr_, offsets[1])])
assert (chunks == chunk_lst)
def it_iterates_over_the_chunk_offsets_to_help_parse(self, iter_offsets_fixture):
(chunk_parser, expected_chunk_offsets) = iter_offsets_fixture
chunk_offsets = list(chunk_parser._iter_chunk_offsets())
assert (chunk_offsets == expected_chunk_offsets)
def chunk_(self, request):
return instance_mock(request, _Chunk)
def chunk_2_(self, request):
return instance_mock(request, _Chunk)
def _ChunkFactory_(self, request, chunk_lst_):
return function_mock(request, 'docx.image.png._ChunkFactory', side_effect=chunk_lst_)
def chunk_lst_(self, chunk_, chunk_2_):
return [chunk_, chunk_2_]
def _ChunkParser__init_(self, request):
return initializer_mock(request, _ChunkParser)
def _iter_chunk_offsets_(self, request):
chunk_offsets = ((PNG_CHUNK_TYPE.IHDR, 2), (PNG_CHUNK_TYPE.pHYs, 4))
return method_mock(request, _ChunkParser, '_iter_chunk_offsets', return_value=iter(chunk_offsets))
def iter_offsets_fixture(self):
bytes_ = b'-filler-\x00\x00\x00\x00IHDRxxxx\x00\x00\x00\x00IEND'
stream_rdr = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
chunk_parser = _ChunkParser(stream_rdr)
expected_chunk_offsets = [(PNG_CHUNK_TYPE.IHDR, 16), (PNG_CHUNK_TYPE.IEND, 28)]
return (chunk_parser, expected_chunk_offsets)
def StreamReader_(self, request, stream_rdr_):
return class_mock(request, 'docx.image.png.StreamReader', return_value=stream_rdr_)
def stream_(self, request):
return instance_mock(request, io.BytesIO)
def stream_rdr_(self, request):
return instance_mock(request, StreamReader) |
def nooper(cls):
def empty_func(*args, **kwargs):
pass
empty_methods = {m_name: empty_func for m_name in cls.__abstractmethods__}
if (not empty_methods):
raise NoopIsANoopException(('nooper implemented no abstract methods on %s' % cls))
return type(cls.__name__, (cls,), empty_methods) |
def system_command_call(command, shell=True):
if (shell and isinstance(command, list)):
command = subprocess.list2cmdline(command)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)
(stdout, stderr) = process.communicate()
if (process.returncode != 0):
((print >> sys.stderr), stderr)
((print >> sys.stderr), 'Running {} failed with exit code {}'.format(command, process.returncode))
return None
return stdout
except OSError as e:
msg = e.strerror
errcodes = 'error {}'.format(e.errno)
if (is_windows_host() and isinstance(e, WindowsError)):
errcodes += ', win-error {}'.format(e.winerror)
try:
import ctypes
msg = unicode(ctypes.FormatError(e.winerror), locale.getdefaultlocale()[1]).encode('utf-8')
except ImportError:
pass
((print >> sys.stderr), 'System command call {} failed [{}]: {}'.format(command, errcodes, msg))
return None |
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold):
logger.info(('Writing predictions to: %s' % output_prediction_file))
logger.info(('Writing nbest to: %s' % output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'answer_type_index', 'start_logit', 'end_logit', 'start_cls_logit', 'end_cls_logit', 'answer_type_logit'])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
answer_type_indexs = _get_best_indexes(result.answer_type_logits, n_best_size)
for (start_index, answer_type_index) in zip(start_indexes, answer_type_indexs):
for end_index in end_indexes:
if (start_index >= len(feature.tokens)):
continue
if (end_index >= len(feature.tokens)):
continue
if (start_index not in feature.token_to_orig_map):
continue
if (end_index not in feature.token_to_orig_map):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, answer_type_index=answer_type_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index], start_cls_logit=result.start_logits[0], end_cls_logit=result.end_logits[0], answer_type_logit=result.answer_type_logits[answer_type_index]))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: x.answer_type_logit), reverse=True)
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'answer_type_index'])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
if ((pred.start_index > 0) and (pred.answer_type_index == args.answer_type['long-answer'])):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
elif (pred.answer_type_index == args.answer_type['no-answer']):
final_text = ''
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, answer_type_index=pred.answer_type_index))
if (not nbest):
nbest.append(_NbestPrediction(text='', answer_type_index=args.answer_type['no-answer']))
assert (len(nbest) >= 1)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['answer_type'] = entry.answer_type_index
nbest_json.append(output)
assert (len(nbest_json) >= 1)
if (not version_2_with_negative):
all_predictions[example.qas_id] = nbest_json[0]['text']
else:
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4, ensure_ascii=False) + '\n'))
with open(output_nbest_file, 'w', encoding='utf-8') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n')) |
def usersMap(theRequest):
users = []
myUserCount = QgisUser.objects.all().count()
myRandomUser = None
myRandomUsers = QgisUser.objects.exclude(image='').order_by('?')[:1]
if (myRandomUsers.count() > 0):
myRandomUser = myRandomUsers[0]
for user in QgisUser.objects.all():
users.append([user.geometry, render_to_string('user_balloon.html', {'user': user})])
myMap = InfoMap(users)
return render_to_response('view_users.html', {'myMap': myMap, 'myUserCount': myUserCount, 'myRandomUser': myRandomUser}, context_instance=RequestContext(theRequest)) |
class Effect7097(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Precursor Weapon')), 'damageMultiplier', (skill.getModifiedItemAttr('damageMultiplierBonus') * skill.level), **kwargs) |
class TestBotDescriptionWithoutRequest(TestBotDescriptionBase):
def test_slot_behaviour(self, bot_description):
for attr in bot_description.__slots__:
assert (getattr(bot_description, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(bot_description)) == len(set(mro_slots(bot_description)))), 'duplicate slot'
def test_to_dict(self, bot_description):
bot_description_dict = bot_description.to_dict()
assert isinstance(bot_description_dict, dict)
assert (bot_description_dict['description'] == self.description)
def test_equality(self):
a = BotDescription(self.description)
b = BotDescription(self.description)
c = BotDescription('text.com')
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a != c)
assert (hash(a) != hash(c)) |
class HumanoidStandupEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'humanoidstandup.xml', 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.model.data
return np.concatenate([data.qpos.flat[2:], data.qvel.flat, data.cinert.flat, data.cvel.flat, data.qfrc_actuator.flat, data.cfrc_ext.flat])
def _step(self, a):
self.do_simulation(a, self.frame_skip)
pos_after = self.model.data.qpos[2][0]
data = self.model.data
uph_cost = ((pos_after - 0) / self.model.opt.timestep)
quad_ctrl_cost = (0.1 * np.square(data.ctrl).sum())
quad_impact_cost = (5e-07 * np.square(data.cfrc_ext).sum())
quad_impact_cost = min(quad_impact_cost, 10)
reward = (((uph_cost - quad_ctrl_cost) - quad_impact_cost) + 1)
done = bool(False)
return (self._get_obs(), reward, done, dict(reward_linup=uph_cost, reward_quadctrl=(- quad_ctrl_cost), reward_impact=(- quad_impact_cost)))
def reset_model(self):
c = 0.01
self.set_state((self.init_qpos + self.np_random.uniform(low=(- c), high=c, size=self.model.nq)), (self.init_qvel + self.np_random.uniform(low=(- c), high=c, size=self.model.nv)))
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = (self.model.stat.extent * 1.0)
self.viewer.cam.lookat[2] += 0.8
self.viewer.cam.elevation = (- 20) |
def test_view_node(caller, **kwargs):
text = ('\n Your name is |g%s|n!\n\n click |lclook|lthere|le to trigger a look command under MXP.\n This node\'s option has no explicit key (nor the "_default" key\n set), and so gets assigned a number automatically. You can infact\n -always- use numbers (1...N) to refer to listed options also if you\n don\'t see a string option key (try it!).\n ' % caller.key)
if kwargs.get('executed_from_dynamic_node', False):
caller.msg('|gCalled from dynamic node:|n \n {}'.format(text))
return
else:
options = {'desc': 'back to main', 'goto': 'test_start_node'}
return (text, options) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self.inplanes = 64
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.feature_num = (512 * block.expansion)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
features = x.view(x.size(0), (- 1))
x = self.fc(features)
return (x, features) |
def uiGetFilePath(initial_path=None):
try:
if initial_path:
output = subprocess.check_output('osascript -e \'set strPath to POSIX file "{}"\' -e \'set theDocument to choose file with prompt "Please select a document to process:" default location strPath\' -e \'set theDocument to (the POSIX path of theDocument)\''.format(initial_path), shell=True)
else:
output = subprocess.check_output('osascript -e \'set theDocument to choose file with prompt "Please select a document to process:"\' -e \'set theDocument to (the POSIX path of theDocument)\'', shell=True)
return output.replace('\n', '')
except subprocess.CalledProcessError as e:
print(e.output) |
class Login(LoginView):
form_class = LoginForm
template_name = 'dictionary/registration/login.html'
def form_valid(self, form):
remember_me = form.cleaned_data.get('remember_me', False)
session_timeout = ((86400 * 30) if remember_me else 86400)
self.request.session.set_expiry(session_timeout)
with suppress(AccountTerminationQueue.DoesNotExist):
AccountTerminationQueue.objects.get(author=form.get_user()).delete()
notifications.info(self.request, _('welcome back. your account has been reactivated.'), extra_tags='persistent')
notifications.info(self.request, _('successfully logged in, dear'))
return super().form_valid(form) |
def load_openai_model(name: str, precision: Optional[str]=None, device: Optional[Union[(str, torch.device)]]=None, jit: bool=True, cache_dir: Optional[str]=None):
if (device is None):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
if (precision is None):
precision = ('fp32' if (device == 'cpu') else 'fp16')
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f'Model {name} not found; available models = {list_openai_models()}')
try:
model = torch.jit.load(model_path, map_location=(device if jit else 'cpu')).eval()
state_dict = None
except RuntimeError:
if jit:
warnings.warn(f'File {model_path} is not a JIT archive. Loading as a state dict instead')
jit = False
state_dict = torch.load(model_path, map_location='cpu')
if (not jit):
cast_dtype = get_cast_dtype(precision)
try:
model = build_model_from_openai_state_dict((state_dict or model.state_dict()), cast_dtype=cast_dtype)
except KeyError:
sd = {k[7:]: v for (k, v) in state_dict['state_dict'].items()}
model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
model = model.to(device)
if (precision.startswith('amp') or (precision == 'fp32')):
model.float()
elif (precision == 'bf16'):
convert_weights_to_lp(model, dtype=torch.bfloat16)
return model
device_holder = torch.jit.trace((lambda : torch.ones([]).to(torch.device(device))), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes('prim::Constant') if ('Device' in repr(n))][(- 1)]
def patch_device(module):
try:
graphs = ([module.graph] if hasattr(module, 'graph') else [])
except RuntimeError:
graphs = []
if hasattr(module, 'forward1'):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes('prim::Constant'):
if (('value' in node.attributeNames()) and str(node['value']).startswith('cuda')):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
if (precision == 'fp32'):
float_holder = torch.jit.trace((lambda : torch.ones([]).float()), example_inputs=[])
float_input = list(float_holder.graph.findNode('aten::to').inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = ([module.graph] if hasattr(module, 'graph') else [])
except RuntimeError:
graphs = []
if hasattr(module, 'forward1'):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes('aten::to'):
inputs = list(node.inputs())
for i in [1, 2]:
if (inputs[i].node()['value'] == 5):
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
model.visual.image_size = model.input_resolution.item()
return model |
class TestVersion(unittest.TestCase):
def test_version(self):
version = pyppeteer.version
self.assertTrue(isinstance(version, str))
self.assertEqual(version.count('.'), 2)
def test_version_info(self):
vinfo = pyppeteer.version_info
self.assertEqual(len(vinfo), 3)
for i in vinfo:
self.assertTrue(isinstance(i, int)) |
def async_wraps(cls: type[object], wrapped_cls: type[object], attr_name: str) -> t.Callable[([CallT], CallT)]:
def decorator(func: CallT) -> CallT:
func.__name__ = attr_name
func.__qualname__ = '.'.join((cls.__qualname__, attr_name))
func.__doc__ = 'Like :meth:`~{}.{}.{}`, but async.\n\n '.format(wrapped_cls.__module__, wrapped_cls.__qualname__, attr_name)
return func
return decorator |
def get_solcast_historic(latitude, longitude, start, api_key, end=None, duration=None, map_variables=True, **kwargs):
params = dict(latitude=latitude, longitude=longitude, start=start, end=end, duration=duration, api_key=api_key, format='json', **kwargs)
data = _get_solcast(endpoint='historic/radiation_and_weather', params=params, api_key=api_key, map_variables=map_variables)
return (data, {'latitude': latitude, 'longitude': longitude}) |
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
if (((class_info.last_line - class_info.starting_linenum) <= 24) or (linenum <= class_info.starting_linenum)):
return
matched = Match('\\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
prev_line = clean_lines.lines[(linenum - 1)]
if ((not IsBlankLine(prev_line)) and (not Search('\\b(class|struct)\\b', prev_line)) and (not Search('\\\\$', prev_line))):
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search('\\{\\s*$', clean_lines.lines[i]):
end_class_head = i
break
if (end_class_head < (linenum - 1)):
error(filename, linenum, 'whitespace/blank_line', 3, ('"%s:" should be preceded by a blank line' % matched.group(1))) |
class IDBH(tc.nn.Module):
def __init__(self, version):
super().__init__()
if (version == 'cifar10-weak'):
layers = [T.RandomHorizontalFlip(), CropShift(0, 11), ColorShape('color'), T.ToTensor(), T.RandomErasing(p=0.5)]
elif (version == 'cifar10-strong'):
layers = [T.RandomHorizontalFlip(), CropShift(0, 11), ColorShape('color'), T.ToTensor(), T.RandomErasing(p=1)]
elif (version == 'svhn'):
layers = [T.RandomHorizontalFlip(), CropShift(0, 9), ColorShape('shape'), T.ToTensor(), T.RandomErasing(p=1, scale=(0.02, 0.5))]
else:
raise Exception('IDBH: invalid version string')
self.layers = T.Compose(layers)
def forward(self, img):
return self.layers(img) |
def build_dataset(is_train, args, infer_no_resize=False):
transform = build_transform(is_train, args, infer_no_resize)
if (args.data_set == 'CIFAR100'):
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 100
elif (args.data_set == 'CIFAR10'):
dataset = datasets.CIFAR10(args.data_path, train=is_train, transform=transform, download=True)
nb_classes = 10
elif (args.data_set == 'CARS'):
dataset = CarsDataset(args.data_path, train=is_train, transform=transform)
nb_classes = 196
elif (args.data_set == 'FLOWERS'):
root = os.path.join(args.data_path, ('train' if is_train else 'test'))
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 102
elif (args.data_set == 'IMNET'):
root = os.path.join(args.data_path, ('train' if is_train else 'val'))
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif (args.data_set == 'INAT'):
dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif (args.data_set == 'INAT19'):
dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
return (dataset, nb_classes) |
def test_should_show_fixtures_used_by_test(pytester: Pytester) -> None:
pytester.makeconftest('\n import pytest\n \n def arg1():\n """arg1 from conftest"""\n \n def arg2():\n """arg2 from conftest"""\n ')
p = pytester.makepyfile('\n import pytest\n \n def arg1():\n """arg1 from testmodule"""\n def test_args(arg1, arg2):\n pass\n ')
result = pytester.runpytest('--fixtures-per-test', p)
assert (result.ret == 0)
result.stdout.fnmatch_lines(['*fixtures used by test_args*', '*(test_should_show_fixtures_used_by_test.py:6)*', 'arg1 -- test_should_show_fixtures_used_by_test.py:3', ' arg1 from testmodule', 'arg2 -- conftest.py:6', ' arg2 from conftest']) |
def test_recompute_equilibrium(verbose=True, warnings=True, plot=True, *args, **kwargs):
if plot:
import matplotlib.pyplot as plt
plt.ion()
s1 = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'))
s1.rescale_path_length(100)
assert s1.is_at_equilibrium()
s1.update('emisscoeff')
s2 = s1.copy()
s2.conditions['thermal_equilibrium'] = False
s2.update()
assert ('emissivity_noslit' not in s2.get_vars())
s1.update()
assert ('emissivity_noslit' in s1.get_vars())
s1.name = 'scaled with Kirchoff law'
s2.name = 'scaled from emisscoeff + abscoeff with RTE'
if verbose:
print(('Checked that scaling at equilibrium with Kirchoff law yields the ' + 'same radiance as by solving the RTE from emisscoeff and abscoeff'))
assert s1.compare_with(s2, spectra_only='radiance_noslit', plot=plot) |
class ImageToWordModel(OnnxInferenceModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def predict(self, image: np.ndarray):
image = cv2.resize(image, self.input_shape[:2][::(- 1)])
image_pred = np.expand_dims(image, axis=0).astype(np.float32)
preds = self.model.run(None, {self.input_name: image_pred})[0]
text = ctc_decoder(preds, self.vocab)[0]
return text |
class SubQueryLineageHolder(ColumnLineageMixin):
def __init__(self) -> None:
self.graph = nx.DiGraph()
def __or__(self, other):
self.graph = nx.compose(self.graph, other.graph)
return self
def _property_getter(self, prop) -> Set[Union[(SubQuery, Table)]]:
return {t for (t, attr) in self.graph.nodes(data=True) if (attr.get(prop) is True)}
def _property_setter(self, value, prop) -> None:
self.graph.add_node(value, **{prop: True})
def read(self) -> Set[Union[(SubQuery, Table)]]:
return self._property_getter(NodeTag.READ)
def add_read(self, value) -> None:
self._property_setter(value, NodeTag.READ)
if hasattr(value, 'alias'):
self.graph.add_edge(value, value.alias, type=EdgeType.HAS_ALIAS)
def write(self) -> Set[Union[(SubQuery, Table)]]:
return self._property_getter(NodeTag.WRITE)
def add_write(self, value) -> None:
self._property_setter(value, NodeTag.WRITE)
def cte(self) -> Set[SubQuery]:
return self._property_getter(NodeTag.CTE)
def add_cte(self, value) -> None:
self._property_setter(value, NodeTag.CTE)
def write_columns(self) -> List[Column]:
tgt_cols = []
if (tgt_tbl := self._get_target_table()):
tgt_col_with_idx: List[Tuple[(Column, int)]] = sorted([(col, attr.get(EdgeTag.INDEX, 0)) for (tbl, col, attr) in self.graph.out_edges(tgt_tbl, data=True) if (attr['type'] == EdgeType.HAS_COLUMN)], key=(lambda x: x[1]))
tgt_cols = [x[0] for x in tgt_col_with_idx]
return tgt_cols
def add_write_column(self, *tgt_cols: Column) -> None:
if self.write:
tgt_tbl = list(self.write)[0]
for (idx, tgt_col) in enumerate(tgt_cols):
tgt_col.parent = tgt_tbl
if (tgt_col in self.write_columns):
break
self.graph.add_edge(tgt_tbl, tgt_col, type=EdgeType.HAS_COLUMN, **{EdgeTag.INDEX: idx})
def add_column_lineage(self, src: Column, tgt: Column) -> None:
self.graph.add_edge(src, tgt, type=EdgeType.LINEAGE)
self.graph.add_edge(tgt.parent, tgt, type=EdgeType.HAS_COLUMN)
if (src.parent is not None):
self.graph.add_edge(src.parent, src, type=EdgeType.HAS_COLUMN)
def get_table_columns(self, table: Union[(Table, SubQuery)]) -> List[Column]:
return [tgt for (src, tgt, edge_type) in self.graph.out_edges(nbunch=table, data='type') if ((edge_type == EdgeType.HAS_COLUMN) and isinstance(tgt, Column) and (tgt.raw_name != '*'))]
def expand_wildcard(self, metadata_provider: MetaDataProvider) -> None:
if (tgt_table := self._get_target_table()):
for column in self.write_columns:
if (column.raw_name == '*'):
tgt_wildcard = column
for src_wildcard in self.get_source_columns(tgt_wildcard):
if (source_table := src_wildcard.parent):
src_table_columns = []
if isinstance(source_table, SubQuery):
src_table_columns = self.get_table_columns(source_table)
elif (isinstance(source_table, Table) and metadata_provider):
src_table_columns = metadata_provider.get_table_columns(source_table)
if src_table_columns:
self._replace_wildcard(tgt_table, src_table_columns, tgt_wildcard, src_wildcard)
def _get_target_table(self) -> Optional[Union[(SubQuery, Table)]]:
table = None
if (write_only := self.write.difference(self.read)):
table = next(iter(write_only))
return table
def get_source_columns(self, node: Column) -> List[Column]:
return [src for (src, tgt, edge_type) in self.graph.in_edges(nbunch=node, data='type') if ((edge_type == EdgeType.LINEAGE) and isinstance(src, Column))]
def _replace_wildcard(self, tgt_table: Union[(Table, SubQuery)], src_table_columns: List[Column], tgt_wildcard: Column, src_wildcard: Column) -> None:
target_columns = self.get_table_columns(tgt_table)
for src_col in src_table_columns:
new_column = Column(src_col.raw_name)
new_column.parent = tgt_table
if ((new_column in target_columns) or (src_col.raw_name == '*')):
continue
self.graph.add_edge(tgt_table, new_column, type=EdgeType.HAS_COLUMN)
self.graph.add_edge(src_col.parent, src_col, type=EdgeType.HAS_COLUMN)
self.graph.add_edge(src_col, new_column, type=EdgeType.LINEAGE)
if self.graph.has_node(tgt_wildcard):
self.graph.remove_node(tgt_wildcard)
if self.graph.has_node(src_wildcard):
self.graph.remove_node(src_wildcard) |
def compute_checksum(filename, hashtype):
file = os.fsdecode(filename)
if (not exists(file)):
return None
buf = fsbsize(filename)
if (hashtype in ('adler32', 'crc32')):
hf = getattr(zlib, hashtype)
last = 0
with open(file, mode='rb') as fp:
for chunk in iter((lambda : fp.read(buf)), ''):
last = hf(chunk, last)
return '{:x}'.format(last)
elif (hashtype in hashlib.algorithms_available):
h = hashlib.new(hashtype)
with open(file, mode='rb') as fp:
for chunk in iter((lambda : fp.read((buf * h.block_size))), ''):
h.update(chunk)
return h.hexdigest()
else:
return None |
def test_context():
with pm.Model():
pm.Normal('x')
ctx = multiprocessing.get_context('spawn')
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*number of samples.*', UserWarning)
pm.sample(tune=2, draws=2, chains=2, cores=2, mp_ctx=ctx) |
def test_componentanimation():
vc = OSC.utils._VehicleComponent(OSC.VehicleComponentType.doorFrontLeft)
vc2 = OSC.utils._VehicleComponent(OSC.VehicleComponentType.doorRearRight)
udc = OSC.UserDefinedComponent('my_component')
udc2 = OSC.UserDefinedComponent('my_component2')
udc3 = OSC.UserDefinedComponent('doorFrontLeft')
ca = OSC.utils._ComponentAnimation(vc)
prettyprint(ca.get_element())
ca2 = OSC.utils._ComponentAnimation(vc2)
assert (ca != ca2)
ca3 = OSC.utils._ComponentAnimation(udc)
prettyprint(ca3.get_element())
ca4 = OSC.utils._ComponentAnimation(udc3)
prettyprint(ca4.get_element())
assert (ca != ca3)
assert (ca != ca4)
ca5 = OSC.utils._ComponentAnimation(udc2)
assert (ca3 != ca5)
ca6 = OSC.utils._ComponentAnimation.parse(ca.get_element())
prettyprint(ca6.get_element())
assert (ca6 == ca)
ca7 = OSC.utils._ComponentAnimation.parse(ca2.get_element())
assert (ca7 == ca2)
ca8 = OSC.utils._ComponentAnimation.parse(ca4.get_element())
assert (ca8 == ca4)
assert (ca8 != ca) |
def prime_factors(obj):
visited = set((obj,))
ef = getattr(obj, '_e_factors', None)
if (not ef):
return
fn = ef[0]
e = getattr(obj, fn, None)
if (e in visited):
raise RecursiveFactor(obj, e)
visited.add(e)
(yield (fn, e))
while (e is not None):
ef = getattr(obj, '_e_factors', None)
fn = ef[0]
e = getattr(e, fn, None)
if (e in visited):
raise RecursiveFactor(obj, e)
visited.add(e)
(yield (fn, e)) |
class GroupPointTest(tf.test.TestCase):
def test(self):
pass
def test_grad(self):
with tf.device('/gpu:0'):
points = tf.constant(np.random.random((1, 128, 16)).astype('float32'))
print(points)
xyz1 = tf.constant(np.random.random((1, 128, 3)).astype('float32'))
xyz2 = tf.constant(np.random.random((1, 8, 3)).astype('float32'))
radius = 0.3
nsample = 32
(idx, pts_cnt) = query_ball_point(radius, nsample, xyz1, xyz2)
grouped_points = group_point(points, idx)
print(grouped_points)
with self.test_session():
print('---- Going to compute gradient error')
err = tf.test.compute_gradient_error(points, (1, 128, 16), grouped_points, (1, 8, 32, 16))
print(err)
self.assertLess(err, 0.0001) |
def rand_reach():
vp = np.random.uniform(low=0, high=360)
goal = np.concatenate([np.random.uniform(low=(- 1.1), high=(- 0.5), size=1), np.random.uniform(low=0.5, high=1.1, size=1)]).tolist()
armcolor = getcolor()
bgcolor = getcolor()
while (np.linalg.norm((bgcolor - armcolor)) < 0.5):
bgcolor = np.random.uniform(low=0, high=1, size=3)
armcolor = (armcolor.tolist() + [1.0])
bgcolor = (bgcolor.tolist() + [1.0])
geoms = []
for i in range(5):
pos_x = np.random.uniform(low=(- 0.9), high=0.9)
pos_y = np.random.uniform(low=0, high=1.0)
rgba = getcolor().tolist()
isinv = (1 if (np.random.random() > 0.5) else 0)
geoms.append([(rgba + [isinv]), pos_x, pos_y])
return dict(vp=vp, bgcolor=bgcolor, armcolor=armcolor, goal=goal, imsize=(48, 48), geoms=geoms, name='reach', modelname='model/reachvpdistract130723', meanfile='model/reach_inception.npz', modeldata='model/reachdata_train.npy', experttheano='experttheano_reach.pkl') |
class LR_Scheduler(object):
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0, lr_step=0, warmup_epochs=0):
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if (mode == 'step'):
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = (num_epochs * iters_per_epoch)
self.epoch = (- 1)
self.warmup_iters = (warmup_epochs * iters_per_epoch)
def __call__(self, optimizer, i, epoch, best_pred):
T = ((epoch * self.iters_per_epoch) + i)
if (self.mode == 'cos'):
lr = ((0.5 * self.lr) * (1 + math.cos((((1.0 * T) / self.N) * math.pi))))
elif (self.mode == 'poly'):
lr = (self.lr * pow((1 - ((1.0 * T) / self.N)), 0.9))
elif (self.mode == 'step'):
lr = (self.lr * (0.1 ** (epoch // self.lr_step)))
else:
raise NotImplemented
if ((self.warmup_iters > 0) and (T < self.warmup_iters)):
lr = (((lr * 1.0) * T) / self.warmup_iters)
if (epoch > self.epoch):
print(('\n=>Epoches %i, learning rate = %.4f, previous best = %.4f' % (epoch, lr, best_pred)))
self.epoch = epoch
assert (lr >= 0)
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if (len(optimizer.param_groups) == 1):
optimizer.param_groups[0]['lr'] = lr
else:
optimizer.param_groups[0]['lr'] = lr
for i in range(1, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = (lr * 10) |
class _AttributeCollector():
def __init__(self, type):
self.attributes = {}
self.type = type
def __call__(self, name, returned=None, function=None, argnames=['self'], check_existence=True, parent=None):
try:
builtin = getattr(self.type, name)
except AttributeError:
if check_existence:
raise
builtin = None
self.attributes[name] = BuiltinName(BuiltinFunction(returned=returned, function=function, argnames=argnames, builtin=builtin, parent=parent))
def __setitem__(self, name, value):
self.attributes[name] = value |
class VSA_Module(nn.Module):
def __init__(self, opt={}):
super(VSA_Module, self).__init__()
channel_size = opt['multiscale']['multiscale_input_channel']
out_channels = opt['multiscale']['multiscale_output_channel']
embed_dim = opt['embed']['embed_dim']
self.LF_conv = nn.Conv2d(in_channels=192, out_channels=channel_size, kernel_size=3, stride=4)
self.HF_conv = nn.Conv2d(in_channels=768, out_channels=channel_size, kernel_size=1, stride=1)
self.conv1x1_1 = nn.Conv2d(in_channels=(channel_size * 2), out_channels=out_channels, kernel_size=1)
self.conv1x1_2 = nn.Conv2d(in_channels=(channel_size * 2), out_channels=out_channels, kernel_size=1)
self.solo_attention = nn.Linear(in_features=256, out_features=embed_dim)
def forward(self, lower_feature, higher_feature, solo_feature):
lower_feature = self.LF_conv(lower_feature)
higher_feature = self.HF_conv(higher_feature)
concat_feature = torch.cat([lower_feature, higher_feature], dim=1)
concat_feature = (higher_feature.mean(dim=1, keepdim=True).expand_as(concat_feature) + concat_feature)
main_feature = self.conv1x1_1(concat_feature)
attn_feature = torch.sigmoid(self.conv1x1_2(concat_feature).view(concat_feature.shape[0], 1, (- 1))).view(concat_feature.shape[0], 1, main_feature.shape[2], main_feature.shape[3])
atted_feature = (main_feature * attn_feature).squeeze(dim=1).view(attn_feature.shape[0], (- 1))
solo_att = torch.sigmoid(self.solo_attention(atted_feature))
solo_feature = (solo_feature * solo_att)
return solo_feature |
def register_mot_instances(name, metadata, json_file, image_root):
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
DatasetCatalog.register(name, (lambda : load_video_json(json_file, image_root, name, extra_annotation_keys=['instance_id'], map_inst_id=True)))
MetadataCatalog.get(name).set(json_file=json_file, image_root=image_root, evaluator_type='mot', **metadata) |
def set_project(apps, schema_editor):
Value = apps.get_model('projects', 'Value')
Snapshot = apps.get_model('projects', 'Snapshot')
for value in Value.objects.all():
value.project = value.snapshot.project
value.snapshot = None
value.save()
for snapshot in Snapshot.objects.all():
snapshot.delete() |
class CmdCancel(SubCommand):
def add_arguments(self, subparser: argparse.ArgumentParser) -> None:
subparser.add_argument('app_handle', type=str, help='torchx app handle (e.g. local://session-name/app-id)')
def run(self, args: argparse.Namespace) -> None:
app_handle = args.app_handle
runner = get_runner()
runner.cancel(app_handle) |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, sr_ratio=1, apply_transform=False):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, (dim * 2), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if (sr_ratio > 1):
self.sr = nn.Conv2d(dim, dim, kernel_size=(sr_ratio + 1), stride=sr_ratio, padding=(sr_ratio // 2), groups=dim)
self.sr_norm = nn.LayerNorm(dim)
self.apply_transform = (apply_transform and (num_heads > 1))
if self.apply_transform:
self.transform_conv = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1, stride=1)
self.transform_norm = nn.InstanceNorm2d(self.num_heads)
def forward(self, x, H, W):
(B, N, C) = x.shape
q = self.q(x).reshape(B, N, self.num_heads, (C // self.num_heads)).permute(0, 2, 1, 3)
if (self.sr_ratio > 1):
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, (- 1)).permute(0, 2, 1)
x_ = self.sr_norm(x_)
kv = self.kv(x_).reshape(B, (- 1), 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, N, 2, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(k, v) = (kv[0], kv[1])
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
if self.apply_transform:
attn = self.transform_conv(attn)
attn = attn.softmax(dim=(- 1))
attn = self.transform_norm(attn)
else:
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x |
.parametrize(('prefer_metroids', 'prefer_stronger_metroids', 'prefer_bosses', 'expected_max_slider'), [(True, False, False, 25), (False, True, False, 14), (False, False, True, 4), (True, True, False, 39), (True, False, True, 29), (False, True, True, 18), (True, True, True, 39)])
def test_preferred_dna(skip_qtbot, msr_game_description, preset_manager, prefer_metroids: bool, prefer_stronger_metroids: bool, prefer_bosses: bool, expected_max_slider: int):
game = msr_game_description.game
base = preset_manager.default_preset_for_game(game).get_preset()
preset = dataclasses.replace(base, uuid=uuid.UUID('b41fde84-1f57-4b79-8cd6-3e5a78077fa6'))
base_configuration = preset.configuration
options = MagicMock()
assert isinstance(base_configuration, MSRConfiguration)
tab = PresetMSRGoal((editor := PresetEditor(preset, options)), msr_game_description, MagicMock())
skip_qtbot.addWidget(tab)
tab.on_preset_changed(preset)
assert tab.dna_slider.isEnabled()
assert (tab.dna_slider.value() > 0)
initial_slider_value = tab.dna_slider.value()
tab.prefer_metroids_check.setChecked(prefer_metroids)
slider_value_after_first_set = tab.dna_slider.value()
tab.prefer_stronger_metroids_check.setChecked(prefer_stronger_metroids)
tab.prefer_bosses_check.setChecked(prefer_bosses)
tab._update_slider_max()
if ((not prefer_metroids) and (not prefer_stronger_metroids) and (not prefer_bosses)):
assert (slider_value_after_first_set == 0)
assert (slider_value_after_first_set >= tab.dna_slider.value())
assert (tab.num_preferred_locations == expected_max_slider)
assert (tab.dna_slider.maximum() == expected_max_slider)
assert (tab.dna_slider.isEnabled() == (expected_max_slider > 0))
configuration = editor.configuration
assert isinstance(configuration, MSRConfiguration)
assert (configuration.artifacts.prefer_metroids == prefer_metroids)
assert (configuration.artifacts.prefer_stronger_metroids == prefer_stronger_metroids)
assert (configuration.artifacts.prefer_bosses == prefer_bosses)
expected_artifacts = expected_max_slider
if (initial_slider_value < expected_max_slider):
expected_artifacts = initial_slider_value
if (slider_value_after_first_set == 0):
expected_artifacts = 0
assert (configuration.artifacts.required_artifacts == expected_artifacts)
assert (tab.dna_slider.value() == expected_artifacts) |
def load_tf_sess_variables_to_keras_single_gpu(path: 'str', compressed_ops: List['str']) -> tf.compat.v1.keras.Model:
to_ignore = map(change_name_of_compressed_op, compressed_ops)
class Model(tf.compat.v1.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.imported = tf.compat.v1.saved_model.load_v2(path)
self.variables_list = [v for v in self.imported.variables if (v.name not in to_ignore)]
def call(self, inputs, training=None):
if training:
return self.imported.signatures['train'](inputs)
return self.imported.signatures['serving_default'](input)
return Model() |
def test_edit_file_with_spaces(base_app, request, monkeypatch):
base_app.editor = 'fooedit'
m = mock.MagicMock(name='Popen')
monkeypatch.setattr('subprocess.Popen', m)
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'my commands.txt')
run_cmd(base_app, 'edit "{}"'.format(filename))
m.assert_called_once() |
def write_data(flag, image, text_only):
def read_post(flag):
stop_words = stopwordslist()
pre_path = '../Data/weibo/tweets/'
file_list = [(pre_path + 'test_nonrumor.txt'), (pre_path + 'test_rumor.txt'), (pre_path + 'train_nonrumor.txt'), (pre_path + 'train_rumor.txt')]
if (flag == 'train'):
id = pickle.load(open('../Data/weibo/train_id.pickle', 'rb'))
elif (flag == 'validate'):
id = pickle.load(open('../Data/weibo/validate_id.pickle', 'rb'))
elif (flag == 'test'):
id = pickle.load(open('../Data/weibo/test_id.pickle', 'rb'))
post_content = []
labels = []
image_ids = []
twitter_ids = []
data = []
column = ['post_id', 'image_id', 'original_post', 'post_text', 'label', 'event_label']
key = (- 1)
map_id = {}
top_data = []
for (k, f) in enumerate(file_list):
f = open(f, 'rb')
if (((k + 1) % 2) == 1):
label = 0
else:
label = 1
twitter_id = 0
line_data = []
top_line_data = []
for (i, l) in enumerate(f.readlines()):
if (((i + 1) % 3) == 1):
line_data = []
twitter_id = l.split('|')[0]
line_data.append(twitter_id)
if (((i + 1) % 3) == 2):
line_data.append(l.lower())
if (((i + 1) % 3) == 0):
l = clean_str_sst(unicode(l, 'utf-8'))
seg_list = jieba.cut_for_search(l)
new_seg_list = []
for word in seg_list:
if (word not in stop_words):
new_seg_list.append(word)
clean_l = ' '.join(new_seg_list)
if ((len(clean_l) > 10) and (line_data[0] in id)):
post_content.append(l)
line_data.append(l)
line_data.append(clean_l)
line_data.append(label)
event = int(id[line_data[0]])
if (event not in map_id):
map_id[event] = len(map_id)
event = map_id[event]
else:
event = map_id[event]
line_data.append(event)
data.append(line_data)
f.close()
data_df = pd.DataFrame(np.array(data), columns=column)
write_txt(top_data)
return (post_content, data_df)
(post_content, post) = read_post(flag)
print(('Original post length is ' + str(len(post_content))))
print(('Original data frame is ' + str(post.shape)))
def find_most(db):
maxcount = max((len(v) for v in db.values()))
return [k for (k, v) in db.items() if (len(v) == maxcount)]
def select(train, selec_indices):
temp = []
for i in range(len(train)):
ele = list(train[i])
temp.append([ele[i] for i in selec_indices])
return temp
def paired(text_only=False):
ordered_image = []
ordered_text = []
ordered_post = []
ordered_event = []
label = []
post_id = []
image_id_list = []
image_id = ''
for (i, id) in enumerate(post['post_id']):
for image_id in post.iloc[i]['image_id'].split('|'):
image_id = image_id.split('/')[(- 1)].split('.')[0]
if (image_id in image):
break
if (text_only or (image_id in image)):
if (not text_only):
image_name = image_id
image_id_list.append(image_name)
ordered_image.append(image[image_name])
ordered_text.append(post.iloc[i]['original_post'])
ordered_post.append(post.iloc[i]['post_text'])
ordered_event.append(post.iloc[i]['event_label'])
post_id.append(id)
label.append(post.iloc[i]['label'])
label = np.array(label, dtype=np.int)
ordered_event = np.array(ordered_event, dtype=np.int)
print(('Label number is ' + str(len(label))))
print(('Rummor number is ' + str(sum(label))))
print(('Non rummor is ' + str((len(label) - sum(label)))))
if (flag == 'test'):
y = np.zeros(len(ordered_post))
else:
y = []
data = {'post_text': np.array(ordered_post), 'original_post': np.array(ordered_text), 'image': ordered_image, 'social_feature': [], 'label': np.array(label), 'event_label': ordered_event, 'post_id': np.array(post_id), 'image_id': image_id_list}
print(('data size is ' + str(len(data['post_text']))))
return data
paired_data = paired(text_only)
print(('paired post length is ' + str(len(paired_data['post_text']))))
print((('paried data has ' + str(len(paired_data))) + ' dimension'))
return paired_data |
.parametrize('value, kwargs, result', (('5,6,7', {}, [5, 6, 7]), ('5.6.7', {'separator': '.'}, [5, 6, 7]), ('5,6,7', {'cast': str}, ['5', '6', '7']), ('X,Y,Z', {}, ['X', 'Y', 'Z']), ('X,Y,Z', {'cast': str}, ['X', 'Y', 'Z']), ('X.Y.Z', {'separator': '.'}, ['X', 'Y', 'Z']), ('0,5,7.1', {'cast': bool}, [False, True, True])))
def test_adapter_values(value, kwargs, result):
a = FakeAdapter()
with pytest.warns(FutureWarning):
assert (a.values(value, **kwargs) == result) |
class Table(QtWidgets.QTableView):
supported_formats = {'CSV file (*.csv)': 'csv', 'Excel file (*.xlsx)': 'excel', 'HTML file (*.html *.htm)': 'html', 'JSON file (*.json)': 'json', 'LaTeX file (*.tex)': 'latex', 'Markdown file (*.md)': 'markdown', 'XML file (*.xml)': 'xml'}
def __init__(self, refresh_time=0.2, check_status=True, force_reload=False, layout_class=PandasModelByColumn, column_index=None, float_digits=6, parent=None):
super().__init__(parent)
self.force_reload = force_reload
self.float_digits = float_digits
model = layout_class(column_index=column_index)
self.setModel(model)
self.horizontalHeader().setStyleSheet('font: bold;')
self.sortByColumn((- 1), QtCore.Qt.SortOrder.AscendingOrder)
self.setSortingEnabled(True)
self.horizontalHeader().setSectionsMovable(True)
self.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeMode.ResizeToContents)
self.setup_context_menu()
self.refresh_time = refresh_time
self.check_status = check_status
if (self.refresh_time is not None):
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update_tables)
self.timer.start(int((self.refresh_time * 1000.0)))
def setModel(self, model):
model.float_digits = self.float_digits
if SORTING_ENABLED:
proxyModel = QtCore.QSortFilterProxyModel(self)
proxyModel.setSourceModel(model)
model = proxyModel
model.setSortRole(SORT_ROLE)
super().setModel(model)
def source_model(self):
model = self.model()
if SORTING_ENABLED:
model = model.sourceModel()
return model
def export_action(self):
df = self.source_model().export_df()
if (df is not None):
formats = ';;'.join(self.supported_formats.keys())
filename_and_ext = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', '', formats)
filename = filename_and_ext[0]
ext = filename_and_ext[1]
if filename:
mode = self.supported_formats[ext]
prefix = (df.style if (mode == 'latex') else df)
getattr(prefix, ('to_' + mode))(filename)
def refresh_action(self):
self.update_tables()
def copy_action(self):
df = self.source_model().export_df()
if (df is not None):
df.to_clipboard()
def setup_context_menu(self):
self.setContextMenuPolicy(QtCore.Qt.ContextMenuPolicy.CustomContextMenu)
self.customContextMenuRequested.connect(self.context_menu)
self.copy = QtGui.QAction('Copy table data', self)
self.copy.triggered.connect(self.copy_action)
self.refresh = QtGui.QAction('Refresh table data', self)
self.refresh.triggered.connect(self.refresh_action)
self.export = QtGui.QAction('Export table data', self)
self.export.triggered.connect(self.export_action)
def context_menu(self, point):
menu = QtWidgets.QMenu(self)
menu.addAction(self.copy)
menu.addAction(self.refresh)
menu.addAction(self.export)
menu.exec(self.mapToGlobal(point))
def update_tables(self, force=False):
model = self.source_model()
for item in model.results_list:
if ((not self.check_status) or force):
item.update_data()
elif (item.results.procedure.status == Procedure.RUNNING):
item.update_data()
def set_color(self, table, color):
table.set_color(color)
def add_table(self, table):
model = self.source_model()
model.add_results(table)
def remove_table(self, table):
model = self.source_model()
model.remove_results(table)
table.stop()
if (model.rowCount() == 0):
self.setSortingEnabled(False)
self.sortByColumn((- 1), QtCore.Qt.SortOrder.AscendingOrder)
self.setSortingEnabled(True)
def clear(self):
model = self.source_model()
model.clear()
self.setSortingEnabled(False)
self.sortByColumn((- 1), QtCore.Qt.SortOrder.AscendingOrder)
self.setSortingEnabled(True)
def set_index(self, index):
model = self.source_model()
model.set_index(index)
def set_model(self, model_class):
model = self.source_model()
new_model = model.copy_model(model_class)
self.setModel(new_model) |
def prepare_batch_inputs_audio(batched_model_inputs, device, non_blocking=False):
model_inputs = dict(src_txt=batched_model_inputs['query_feat'][0].to(device, non_blocking=non_blocking), src_txt_mask=batched_model_inputs['query_feat'][1].to(device, non_blocking=non_blocking), src_vid=batched_model_inputs['video_feat'][0].to(device, non_blocking=non_blocking), src_vid_mask=batched_model_inputs['video_feat'][1].to(device, non_blocking=non_blocking), src_aud=batched_model_inputs['audio_feat'][0].to(device, non_blocking=non_blocking), src_aud_mask=batched_model_inputs['audio_feat'][1].to(device, non_blocking=non_blocking))
targets = {}
if ('span_labels' in batched_model_inputs):
targets['span_labels'] = [dict(spans=e['spans'].to(device, non_blocking=non_blocking)) for e in batched_model_inputs['span_labels']]
if ('saliency_pos_labels' in batched_model_inputs):
for name in ['saliency_pos_labels', 'saliency_neg_labels']:
targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)
if ('saliency_all_labels' in batched_model_inputs):
targets['saliency_all_labels'] = batched_model_inputs['saliency_all_labels'].to(device, non_blocking=non_blocking)
targets = (None if (len(targets) == 0) else targets)
return (model_inputs, targets) |
class RetrievalRecall(Metric[torch.Tensor]):
def __init__(self: TRetrievalRecall, *, empty_target_action: Union[(Literal['neg'], Literal['pos'], Literal['skip'], Literal['err'])]='neg', k: Optional[int]=None, limit_k_to_size: bool=False, num_queries: int=1, avg: Optional[Union[(Literal['macro'], Literal['none'])]]=None, device: Optional[torch.device]=None) -> None:
_retrieval_recall_param_check(k, limit_k_to_size)
super().__init__(device=device)
self.empty_target_action = empty_target_action
self.num_queries = num_queries
self.k = k
self.limit_k_to_size = limit_k_to_size
self.avg = avg
self._add_state('topk', [torch.empty(0) for _ in range(num_queries)])
self._add_state('target', [torch.empty(0) for _ in range(num_queries)])
_mode()
def update(self: TRetrievalRecall, input: torch.Tensor, target: torch.Tensor, indexes: Optional[torch.Tensor]=None) -> TRetrievalRecall:
_retrieval_recall_update_input_check(input, target, num_queries=self.num_queries, indexes=indexes)
if (self.num_queries == 1):
self.update_single_query(0, input, target)
return self
if (indexes is None):
raise ValueError('`indexes` must be passed during update() when num_queries > 1.')
for i in range(self.num_queries):
if (i in indexes):
self.update_single_query(i, input[(indexes == i)], target[(indexes == i)])
return self
def update_single_query(self, i: int, input: torch.Tensor, target: torch.Tensor) -> None:
batch_preds = torch.cat([self.topk[i], input])
batch_targets = torch.cat([self.target[i], target])
preds_topk = get_topk(batch_preds, self.k)
self.topk[i] = preds_topk[0]
self.target[i] = batch_targets.gather(dim=(- 1), index=preds_topk[1])
_mode()
def compute(self: TRetrievalRecall) -> torch.Tensor:
rp = []
for i in range(self.num_queries):
if (not len(self.target[i])):
rp.append(torch.tensor([torch.nan]))
elif (1 not in self.target[i]):
if (self.empty_target_action == 'pos'):
rp.append(torch.tensor([1.0]))
elif (self.empty_target_action == 'neg'):
rp.append(torch.tensor([0.0]))
elif (self.empty_target_action == 'skip'):
rp.append(torch.tensor([torch.nan]))
elif (self.empty_target_action == 'err'):
raise ValueError(f'no positive value found in target={self.target[i]}.')
else:
rp.append(retrieval_recall(self.topk[i], self.target[i], self.k, self.limit_k_to_size).reshape((- 1)))
rp = torch.cat(rp).to(self.device)
if (self.avg == 'macro'):
return rp.nanmean()
else:
return rp
_mode()
def merge_state(self: TRetrievalRecall, metrics: Iterable[TRetrievalRecall]) -> TRetrievalRecall:
for i in range(self.num_queries):
self.topk[i] = torch.cat(([self.topk[i]] + [m.topk[i] for m in metrics])).to(self.device)
self.target[i] = torch.cat(([self.target[i]] + [m.target[i] for m in metrics])).to(self.device)
return self |
class DataSuite():
files: list[str]
base_path = test_temp_dir
data_prefix = test_data_prefix
required_out_section = False
native_sep = False
test_name_suffix = ''
def setup(self) -> None:
def run_case(self, testcase: DataDrivenTestCase) -> None:
raise NotImplementedError |
.fast
def test_all_slit_shapes(FWHM=0.4, verbose=True, plot=True, close_plots=True, *args, **kwargs):
_clean(plot, close_plots)
from radis.spectrum.spectrum import Spectrum
from radis.test.utils import getTestFile
s = Spectrum.from_txt(getTestFile('calc_N2C_spectrum_Trot1200_Tvib3000.txt'), quantity='radiance_noslit', wunit='nm', unit='mW/cm2/sr/m')
wstep = np.diff(s.get_wavelength())[0]
s.apply_slit(FWHM, unit='nm', shape='gaussian', plot_slit=plot)
assert np.isclose(get_FWHM(*s.get_slit()), FWHM, atol=(2 * wstep))
s.apply_slit(FWHM, unit='nm', shape='triangular', plot_slit=plot)
assert np.isclose(get_FWHM(*s.get_slit()), FWHM, atol=(2 * wstep))
s.apply_slit(((FWHM * 0.9), (FWHM * 1.1)), unit='nm', shape='trapezoidal', plot_slit=plot)
assert np.isclose(get_FWHM(*s.get_slit()), FWHM, atol=(2 * wstep))
s.apply_slit(getTestFile('slitfunction.txt'), unit='nm', plot_slit=plot)
assert np.isclose(get_effective_FWHM(*s.get_slit()), FWHM, atol=0.01)
if verbose:
print('\n>>> _test_all_slits yield correct FWHM (+- wstep) : OK\n')
return True |
def keras_sequential_conv_net():
model = tf.keras.Sequential([tf.keras.layers.Input(shape=(28, 28, 3)), tf.keras.layers.Conv2D(4, kernel_size=3, activation=None), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.AvgPool2D(), tf.keras.layers.Dense(10)])
return model |
class L2Step(AttackerStep):
def project(self, x):
diff = (x - self.orig_input)
diff = diff.renorm(p=2, dim=0, maxnorm=self.eps)
return (self.orig_input + diff)
def make_step(self, g):
g_norm = ch.norm(g.view(g.shape[0], (- 1)), dim=1).view((- 1), 1, 1, 1)
scaled_g = (g / (g_norm + 1e-10))
return (scaled_g * self.step_size)
def random_perturb(self, x):
return (ch.rand_like(x) - 0.5).renorm(p=2, dim=1, maxnorm=self.eps) |
class SawyerDoorCloseEnv(SawyerDoorEnv):
def __init__(self):
super().__init__()
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0.1, 0.95, 0.1], dtype=np.float32), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.goal = np.array([0.2, 0.8, 0.15])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.objHeight = self.data.get_geom_xpos('handle')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos
goal_pos = (obj_pos.copy() + np.array([0.1, (- 0.15), 0.05]))
self._target_pos = goal_pos
self.sim.model.body_pos[self.model.body_name2id('door')] = self.obj_init_pos
self.sim.model.site_pos[self.model.site_name2id('goal')] = self._target_pos
self._set_obj_xyz((- 1.5708))
self.maxPullDist = np.linalg.norm((self.data.get_geom_xpos('handle')[:(- 1)] - self._target_pos[:(- 1)]))
self.target_reward = ((1000 * self.maxPullDist) + (1000 * 2))
return self._get_obs()
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.linalg.norm((objPos[:(- 1)] - pullGoal[:(- 1)]))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
def test_plot_area_def_w_swath_def(create_test_swath):
swath_def = _gen_swath_def_numpy(create_test_swath)
with mock.patch('matplotlib.pyplot.savefig') as mock_savefig:
plot_area_def(swath_def, fmt='svg')
mock_savefig.assert_called_with(ANY, format='svg', bbox_inches='tight') |
def _path_tree_for_react_dnd_treeview(tree: list, id_to_path_dict: dict, path: str, parent: int, highlighted_files: list=[]) -> list:
for item in os.listdir(path):
if item.startswith('.'):
continue
item_path = os.path.join(path, item)
droppable = os.path.isdir(item_path)
idx = (len(tree) + 1)
tree.append({'id': idx, 'parent': parent, 'droppable': droppable, 'text': item, 'highlight': (True if (item_path in highlighted_files) else False)})
id_to_path_dict[idx] = item_path
if os.path.isdir(item_path):
_path_tree_for_react_dnd_treeview(tree, id_to_path_dict, item_path, idx)
return [] |
.parametrize('repo, commit_parser, translator, commit_messages,prerelease, expected_new_version', xdist_sort_hack([(lazy_fixture(repo_fixture_name), lazy_fixture(parser_fixture_name), translator, commit_messages, prerelease, expected_new_version) for ((repo_fixture_name, parser_fixture_name, translator), values) in {('repo_with_git_flow_scipy_commits', 'default_scipy_parser', VersionTranslator(prerelease_token='alpha')): [*((commits, True, '1.2.0-alpha.2') for commits in ([], ['uninteresting'])), *((commits, False, '1.2.0') for commits in ([], ['uninteresting'])), (lazy_fixture('scipy_commits_patch'), False, '1.2.0'), (lazy_fixture('scipy_commits_patch'), True, '1.2.0-alpha.3'), (lazy_fixture('scipy_commits_minor'), False, '1.2.0'), (lazy_fixture('scipy_commits_minor'), True, '1.2.0-alpha.3'), (lazy_fixture('scipy_commits_major'), False, '2.0.0'), (lazy_fixture('scipy_commits_major'), True, '2.0.0-alpha.1')], ('repo_with_git_flow_and_release_channels_scipy_commits', 'default_scipy_parser', VersionTranslator(prerelease_token='alpha')): [*((commits, True, '1.1.0-alpha.3') for commits in ([], ['uninteresting'])), *((commits, False, '1.1.0') for commits in ([], ['uninteresting'])), (lazy_fixture('scipy_commits_patch'), False, '1.1.0'), (lazy_fixture('scipy_commits_patch'), True, '1.1.0-alpha.4'), (lazy_fixture('scipy_commits_minor'), False, '1.1.0'), (lazy_fixture('scipy_commits_minor'), True, '1.1.0-alpha.4'), (lazy_fixture('scipy_commits_major'), False, '2.0.0'), (lazy_fixture('scipy_commits_major'), True, '2.0.0-alpha.1')]}.items() for (commit_messages, prerelease, expected_new_version) in values]))
.parametrize('major_on_zero', [True, False])
def test_algorithm_no_zero_dot_versions_scipy(repo, file_in_repo, commit_parser, translator, commit_messages, prerelease, expected_new_version, major_on_zero):
for commit_message in commit_messages:
add_text_to_file(repo, file_in_repo)
repo.git.commit(m=commit_message)
new_version = next_version(repo, translator, commit_parser, prerelease, major_on_zero)
assert (new_version == Version.parse(expected_new_version, prerelease_token=translator.prerelease_token)) |
class AnyExpressionsReporter(AbstractReporter):
def __init__(self, reports: Reports, output_dir: str) -> None:
super().__init__(reports, output_dir)
self.counts: dict[(str, tuple[(int, int)])] = {}
self.any_types_counter: dict[(str, collections.Counter[int])] = {}
def on_file(self, tree: MypyFile, modules: dict[(str, MypyFile)], type_map: dict[(Expression, Type)], options: Options) -> None:
visitor = stats.StatisticsVisitor(inferred=True, filename=tree.fullname, modules=modules, typemap=type_map, all_nodes=True, visit_untyped_defs=False)
tree.accept(visitor)
self.any_types_counter[tree.fullname] = visitor.type_of_any_counter
num_unanalyzed_lines = list(visitor.line_map.values()).count(stats.TYPE_UNANALYZED)
num_any = (visitor.num_any_exprs + num_unanalyzed_lines)
num_total = ((visitor.num_imprecise_exprs + visitor.num_precise_exprs) + num_any)
if (num_total > 0):
self.counts[tree.fullname] = (num_any, num_total)
def on_finish(self) -> None:
self._report_any_exprs()
self._report_types_of_anys()
def _write_out_report(self, filename: str, header: list[str], rows: list[list[str]], footer: list[str]) -> None:
row_len = len(header)
assert all(((len(row) == row_len) for row in (rows + [header, footer])))
min_column_distance = 3
widths = ([(- 1)] * row_len)
for row in (rows + [header, footer]):
for (i, value) in enumerate(row):
widths[i] = max(widths[i], len(value))
for (i, w) in enumerate(widths):
if (i > 0):
widths[i] = (w + min_column_distance)
with open(os.path.join(self.output_dir, filename), 'w') as f:
header_str = ('{:>{}}' * len(widths)).format(*itertools.chain(*zip(header, widths)))
separator = ('-' * len(header_str))
f.write((header_str + '\n'))
f.write((separator + '\n'))
for row_values in rows:
r = ('{:>{}}' * len(widths)).format(*itertools.chain(*zip(row_values, widths)))
f.write((r + '\n'))
f.write((separator + '\n'))
footer_str = ('{:>{}}' * len(widths)).format(*itertools.chain(*zip(footer, widths)))
f.write((footer_str + '\n'))
def _report_any_exprs(self) -> None:
total_any = sum((num_any for (num_any, _) in self.counts.values()))
total_expr = sum((total for (_, total) in self.counts.values()))
total_coverage = 100.0
if (total_expr > 0):
total_coverage = ((float((total_expr - total_any)) / float(total_expr)) * 100)
column_names = ['Name', 'Anys', 'Exprs', 'Coverage']
rows: list[list[str]] = []
for filename in sorted(self.counts):
(num_any, num_total) = self.counts[filename]
coverage = ((float((num_total - num_any)) / float(num_total)) * 100)
coverage_str = f'{coverage:.2f}%'
rows.append([filename, str(num_any), str(num_total), coverage_str])
rows.sort(key=(lambda x: x[0]))
total_row = ['Total', str(total_any), str(total_expr), f'{total_coverage:.2f}%']
self._write_out_report('any-exprs.txt', column_names, rows, total_row)
def _report_types_of_anys(self) -> None:
total_counter: collections.Counter[int] = collections.Counter()
for counter in self.any_types_counter.values():
for (any_type, value) in counter.items():
total_counter[any_type] += value
file_column_name = 'Name'
total_row_name = 'Total'
column_names = ([file_column_name] + list(type_of_any_name_map.values()))
rows: list[list[str]] = []
for (filename, counter) in self.any_types_counter.items():
rows.append(([filename] + [str(counter[typ]) for typ in type_of_any_name_map]))
rows.sort(key=(lambda x: x[0]))
total_row = ([total_row_name] + [str(total_counter[typ]) for typ in type_of_any_name_map])
self._write_out_report('types-of-anys.txt', column_names, rows, total_row) |
def construct_pred_set(predicted_args, cur_event, context_words, doc, args):
trigger_start = cur_event['trigger']['start']
trigger_end = cur_event['trigger']['end']
predicted_set = set()
lowercased_context_words = [w.lower() for w in context_words]
lowercased_doc = (nlp(' '.join(lowercased_context_words)) if args.head_only else None)
not_matched_pred_args = []
for argname in predicted_args:
for (entity_type, entity_text) in predicted_args[argname]:
if (entity_text is None):
continue
entity_text: List[str]
arg_span = find_arg_span(entity_text, context_words, trigger_start, trigger_end, head_only=args.head_only, doc=doc)
if (not arg_span):
normalized_entity_text = []
for word in entity_text:
word = word.lower()
if (('-' in word) and (len(word) > 1)):
normalized_entity_text.extend(word.replace('-', ' - ').split())
else:
normalized_entity_text.append(word)
arg_span = find_arg_span(normalized_entity_text, lowercased_context_words, trigger_start, trigger_end, head_only=args.head_only, doc=lowercased_doc)
if arg_span:
predicted_set.add((arg_span[0], arg_span[1], cur_event['event_type'], argname, entity_type))
else:
not_matched_pred_args.append({'role': argname, 'entity_type': entity_type, 'text': entity_text})
return (predicted_set, not_matched_pred_args) |
class TestWeightSvdPruning(unittest.TestCase):
def test_prune_layer(self):
model = mnist_model.Net()
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
orig_layer_db = LayerDatabase(model, dummy_input)
comp_layer_db = copy.deepcopy(orig_layer_db)
conv2 = comp_layer_db.find_layer_by_name('conv2')
weight_svd_pruner = WeightSvdPruner()
weight_svd_pruner._prune_layer(orig_layer_db, comp_layer_db, conv2, 0.5, aimet_common.defs.CostMetric.mac)
conv2_a = comp_layer_db.find_layer_by_name('conv2.0')
conv2_b = comp_layer_db.find_layer_by_name('conv2.1')
self.assertEqual((1, 1), conv2_a.module.kernel_size)
self.assertEqual(32, conv2_a.module.in_channels)
self.assertEqual(15, conv2_a.module.out_channels)
self.assertEqual((5, 5), conv2_b.module.kernel_size)
self.assertEqual(15, conv2_b.module.in_channels)
self.assertEqual(64, conv2_b.module.out_channels)
self.assertTrue(isinstance(comp_layer_db.model.conv2, nn.Sequential))
for layer in comp_layer_db:
print(('Layer: ' + layer.name))
print((' Module: ' + str(layer.module)))
print(comp_layer_db.model)
def test_prune_model_2_layers(self):
model = mnist_model.Net()
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
fc1 = layer_db.find_layer_by_name('fc1')
conv2 = layer_db.find_layer_by_name('conv2')
pruner = WeightSvdPruner()
layer_db = pruner.prune_model(layer_db, [LayerCompRatioPair(fc1, Decimal(0.5)), LayerCompRatioPair(conv2, Decimal(0.5))], aimet_common.defs.CostMetric.mac, trainer=None)
fc1_a = layer_db.find_layer_by_name('fc1.0')
fc1_b = layer_db.find_layer_by_name('fc1.1')
self.assertEqual(3136, fc1_a.module.in_features)
self.assertEqual(1024, fc1_b.module.out_features)
conv2_a = layer_db.find_layer_by_name('conv2.0')
conv2_b = layer_db.find_layer_by_name('conv2.1')
self.assertEqual((1, 1), conv2_a.module.kernel_size)
self.assertEqual(32, conv2_a.module.in_channels)
self.assertEqual(15, conv2_a.module.out_channels)
self.assertEqual((5, 5), conv2_b.module.kernel_size)
self.assertEqual(15, conv2_b.module.in_channels)
self.assertEqual(64, conv2_b.module.out_channels)
self.assertTrue(isinstance(layer_db.model.fc1, nn.Sequential))
self.assertTrue(isinstance(layer_db.model.conv2, nn.Sequential))
for layer in layer_db:
print(('Layer: ' + layer.name))
print((' Module: ' + str(layer.module)))
print(layer_db.model) |
def test_eigen_transform_ket():
N = 5
a = qutip.destroy(N)
op = (((a * a.dag()) + a) + a.dag())
eigenT = _EigenBasisTransform(qutip.QobjEvo(op))
op_diag = qutip.qdiags(eigenT.eigenvalues(0), [0])
state = qutip.coherent(N, 1.1)
expected = (op state).full()
computed = eigenT.from_eigbasis(0, qutip.data.matmul(op_diag.data, eigenT.to_eigbasis(0, state.data))).to_array()
np.testing.assert_allclose(computed, expected, rtol=1e-14, atol=1e-14) |
def test_atlas_glyps():
assert isinstance(global_atlas, GlyphAtlas)
atlas = GlyphAtlas()
gs = 50
array_id = id(atlas._array)
assert (atlas.get_index_from_hash('0') is None)
i0 = atlas.store_region_with_hash('0', glyphgen(gs))
assert isinstance(i0, int)
(atlas.get_index_from_hash('0') == i0)
i1 = atlas.store_region_with_hash('1', glyphgen(gs))
i2 = atlas.store_region_with_hash('2', glyphgen(gs))
i3 = atlas.store_region_with_hash('3', glyphgen(gs))
assert ([i0, i1, i2, i3] == [0, 1, 2, 3])
assert (array_id == id(atlas._array)) |
def eval_video_single(cfg, models, device, test_loader, interp, fixed_test_size, verbose):
if (cfg.SOURCE == 'Viper'):
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 190, 153, 153, 250, 170, 30, 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 0, 0, 142, 0, 0, 70, 0, 60, 100, 0, 0, 230, 119, 11, 32]
elif (cfg.SOURCE == 'SynthiaSeq'):
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 190, 153, 153, 153, 153, 153, 250, 170, 30, 220, 220, 0, 107, 142, 35, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142]
zero_pad = ((256 * 3) - len(palette))
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
num_classes = cfg.NUM_CLASSES
assert (len(cfg.TEST.RESTORE_FROM) == len(models)), 'Number of models are not matched'
for (checkpoint, model) in zip(cfg.TEST.RESTORE_FROM, models):
load_checkpoint_for_evaluation(model, checkpoint, device)
hist = np.zeros((cfg.NUM_CLASSES, cfg.NUM_CLASSES))
for (index, batch) in tqdm(enumerate(test_loader)):
(image, label, image2, _, _, name) = batch
file_name = name[0].split('/')[(- 1)]
frame = int(file_name.replace('_leftImg8bit.png', '')[(- 6):])
frame1 = (frame - 1)
flow_int16_x10_name = file_name.replace('leftImg8bit.png', (str(frame1).zfill(6) + '_int16_x10'))
flow_int16_x10 = np.load(os.path.join(cfg.TEST.flow_path, (flow_int16_x10_name + '.npy')))
flow = torch.from_numpy((flow_int16_x10 / 10.0)).permute(2, 0, 1).unsqueeze(0)
if (not fixed_test_size):
interp = nn.Upsample(size=(label.shape[1], label.shape[2]), mode='bilinear', align_corners=True)
with torch.no_grad():
output = None
for (model, model_weight) in zip(models, cfg.TEST.MODEL_WEIGHT):
pred_main = models[0](image.cuda(device), image2.cuda(device), flow, device)[1]
output_ = interp(pred_main).cpu().data[0].numpy()
if (output is None):
output = (model_weight * output_)
else:
output += (model_weight * output_)
assert (output is not None), 'Output is None'
output = output.transpose(1, 2, 0)
output = np.argmax(output, axis=2)
amax_output_col = colorize_mask(np.asarray(output, dtype=np.uint8))
name = name[0].split('/')[(- 1)]
image_name = name.split('.')[0]
os.makedirs((cfg.TEST.SNAPSHOT_DIR[0] + '/best_results'), exist_ok=True)
amax_output_col.save(('%s/%s_color.png' % ((cfg.TEST.SNAPSHOT_DIR[0] + '/best_results'), image_name)))
label = label.numpy()[0]
hist += fast_hist(label.flatten(), output.flatten(), cfg.NUM_CLASSES)
inters_over_union_classes = per_class_iu(hist)
if (cfg.SOURCE == 'SynthiaSeq'):
inters_over_union_classes = np.concatenate((inters_over_union_classes[:3], inters_over_union_classes[4:]))
print(f'mIoU = {round((np.nanmean(inters_over_union_classes) * 100), 2)}')
print([np.round((iou * 100), 1) for iou in inters_over_union_classes.tolist()]) |
def test_emit_warning_when_event_loop_fixture_is_redefined(pytester: Pytester):
pytester.makepyfile(dedent(' import asyncio\n import pytest\n\n \n def event_loop():\n loop = asyncio.new_event_loop()\n yield loop\n loop.close()\n\n .asyncio\n async def test_emits_warning():\n pass\n '))
result = pytester.runpytest('--asyncio-mode=strict', '-W default')
result.assert_outcomes(passed=1, warnings=1)
result.stdout.fnmatch_lines(['*event_loop fixture provided by pytest-asyncio has been redefined*']) |
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = 'keepkey.png'
icon_unpaired = 'keepkey_unpaired.png'
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget |
def configInputQueue():
def captureInput(iqueue):
while True:
c = getch()
if ((c == '\x03') or (c == '\x04')):
log.debug('Break received (\\x{0:02X})'.format(ord(c)))
iqueue.put(c)
break
log.debug("Input Char '{}' received".format((c if (c != '\r') else '\\r')))
iqueue.put(c)
input_queue = queue.Queue()
input_thread = threading.Thread(target=(lambda : captureInput(input_queue)))
input_thread.daemon = True
input_thread.start()
return (input_queue, input_thread) |
class KeywordImpression(BaseImpression):
keyword = models.CharField(_('Keyword'), max_length=1000)
publisher = models.ForeignKey(Publisher, related_name='keyword_impressions', on_delete=models.PROTECT)
advertisement = models.ForeignKey(Advertisement, related_name='keyword_impressions', on_delete=models.PROTECT, null=True)
class Meta():
ordering = ('-date',)
unique_together = ('publisher', 'advertisement', 'date', 'keyword')
def __str__(self):
return ('Keyword %s of %s on %s' % (self.keyword, self.advertisement, self.date)) |
class HotelRoom():
id: str
name: str = strawberry.field(resolver=make_localized_resolver('name'))
description: str = strawberry.field(resolver=make_localized_resolver('description'))
price: str
is_sold_out: bool
capacity_left: int
def available_bed_layouts(self) -> List[BedLayout]:
return self.available_bed_layouts.all()
def check_in_dates(self) -> List[str]:
conference_start = self.conference.start
conference_end = self.conference.end
nights = (conference_end - conference_start).days
return [(conference_start + timedelta(days=night)).date() for night in range(nights)]
def check_out_dates(self) -> List[str]:
conference_start = self.conference.start
conference_end = self.conference.end
nights = ((conference_end - conference_start).days + 1)
return [(conference_start + timedelta(days=night)).date() for night in range(1, nights)] |
def test_quantsim_export_quantizer_args():
if (version.parse(tf.version.VERSION) >= version.parse('2.00')):
model = dense_functional()
rand_inp = np.random.randn(100, 5)
qsim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, default_param_bw=16, default_output_bw=16)
qsim.export('./data', 'test_export_with_quant_args')
with open('./data/test_export_with_quant_args.encodings') as json_file:
encoding_data = json.load(json_file)
assert ('quantizer_args' in encoding_data)
quantizer_args = encoding_data['quantizer_args']
assert (quantizer_args['activation_bitwidth'] == 16)
assert (quantizer_args['param_bitwidth'] == 16)
assert (not quantizer_args['per_channel_quantization'])
assert (quantizer_args['quant_scheme'] == QuantScheme.post_training_tf_enhanced.name)
assert (quantizer_args['dtype'] == 'int')
assert quantizer_args['is_symmetric'] |
.parametrize('use_enemy_attribute_randomizer', [False, True])
def test_on_preset_changed(skip_qtbot, preset_manager, use_enemy_attribute_randomizer):
base = preset_manager.default_preset_for_game(RandovaniaGame.METROID_PRIME).get_preset()
preset = dataclasses.replace(base, uuid=uuid.UUID('b41fde84-1f57-4b79-8cd6-3e5a78077fa6'))
options = MagicMock()
editor = PresetEditor(preset, options)
window = PresetEnemyAttributeRandomizer(editor, default_database.game_description_for(preset.game), MagicMock())
if use_enemy_attribute_randomizer:
skip_qtbot.mouseClick(window.activate_randomizer, Qt.MouseButton.LeftButton)
window.range_scale_low.setValue(1.2)
window.range_scale_high.setValue(1.7)
window.range_health_low.setValue(0.12)
window.range_health_high.setValue(1.272)
window.range_speed_low.setValue(3.292)
window.range_speed_high.setValue(7.2)
window.range_damage_low.setValue(9.2)
window.range_damage_high.setValue(99.21)
window.range_knockback_low.setValue(0.2)
window.range_knockback_high.setValue(0.5147)
skip_qtbot.mouseClick(window.diff_xyz, Qt.MouseButton.LeftButton)
else:
pass
window.on_preset_changed(editor.create_custom_preset_with())
config = editor.configuration
assert isinstance(config, PrimeConfiguration)
if use_enemy_attribute_randomizer:
assert (config.enemy_attributes.enemy_rando_range_scale_low == window.range_scale_low.value())
assert (config.enemy_attributes.enemy_rando_range_scale_high == window.range_scale_high.value())
assert (config.enemy_attributes.enemy_rando_range_health_low == window.range_health_low.value())
assert (config.enemy_attributes.enemy_rando_range_health_high == window.range_health_high.value())
assert (config.enemy_attributes.enemy_rando_range_speed_low == window.range_speed_low.value())
assert (config.enemy_attributes.enemy_rando_range_speed_high == window.range_speed_high.value())
assert (config.enemy_attributes.enemy_rando_range_damage_low == window.range_damage_low.value())
assert (config.enemy_attributes.enemy_rando_range_damage_high == window.range_damage_high.value())
assert (config.enemy_attributes.enemy_rando_range_knockback_low == window.range_knockback_low.value())
assert (config.enemy_attributes.enemy_rando_range_knockback_high == window.range_knockback_high.value())
assert (config.enemy_attributes.enemy_rando_diff_xyz == window.diff_xyz.isChecked())
else:
assert (config.enemy_attributes is None) |
def register_model(name, dataclass=None):
def register_model_cls(cls):
if (name in MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model ({})'.format(name))
if (not issubclass(cls, BaseFairseqModel)):
raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
if ((dataclass is not None) and (not issubclass(dataclass, FairseqDataclass))):
raise ValueError('Dataclass {} must extend FairseqDataclass'.format(dataclass))
cls.__dataclass = dataclass
if (dataclass is not None):
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group='model', node=node, provider='fairseq')
_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls |
class dist_info(Command):
description = 'DO NOT CALL DIRECTLY, INTERNAL ONLY: create .dist-info directory'
user_options = [('output-dir=', 'o', 'directory inside of which the .dist-info will becreated (default: top of the source tree)'), ('tag-date', 'd', 'Add date stamp (e.g. ) to version number'), ('tag-build=', 'b', 'Specify explicit tag to add to version number'), ('no-date', 'D', "Don't include date stamp [default]"), ('keep-egg-info', None, '*TRANSITIONAL* will be removed in the future')]
boolean_options = ['tag-date', 'keep-egg-info']
negative_opt = {'no-date': 'tag-date'}
def initialize_options(self):
self.output_dir = None
self.name = None
self.dist_info_dir = None
self.tag_date = None
self.tag_build = None
self.keep_egg_info = False
def finalize_options(self):
dist = self.distribution
project_dir = (dist.src_root or os.curdir)
self.output_dir = Path((self.output_dir or project_dir))
egg_info = self.reinitialize_command('egg_info')
egg_info.egg_base = str(self.output_dir)
if self.tag_date:
egg_info.tag_date = self.tag_date
else:
self.tag_date = egg_info.tag_date
if self.tag_build:
egg_info.tag_build = self.tag_build
else:
self.tag_build = egg_info.tag_build
egg_info.finalize_options()
self.egg_info = egg_info
name = _normalization.safer_name(dist.get_name())
version = _normalization.safer_best_effort_version(dist.get_version())
self.name = f'{name}-{version}'
self.dist_info_dir = os.path.join(self.output_dir, f'{self.name}.dist-info')
def _maybe_bkp_dir(self, dir_path: str, requires_bkp: bool):
if requires_bkp:
bkp_name = f'{dir_path}.__bkp__'
_rm(bkp_name, ignore_errors=True)
_copy(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True)
try:
(yield)
finally:
_rm(dir_path, ignore_errors=True)
shutil.move(bkp_name, dir_path)
else:
(yield)
def run(self):
self.output_dir.mkdir(parents=True, exist_ok=True)
self.egg_info.run()
egg_info_dir = self.egg_info.egg_info
assert os.path.isdir(egg_info_dir), '.egg-info dir should have been created'
log.info("creating '{}'".format(os.path.abspath(self.dist_info_dir)))
bdist_wheel = self.get_finalized_command('bdist_wheel')
with self._maybe_bkp_dir(egg_info_dir, self.keep_egg_info):
bdist_wheel.egg2dist(egg_info_dir, self.dist_info_dir) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.