code stringlengths 281 23.7M |
|---|
.skipif(IS_WIN, reason='Flaky on Windows')
def test_initialize(client_server_pair):
(client, server) = client_server_pair
response = send_initialize_request(client)
assert (server.workspace is not None)
selector = response['capabilities']['notebookDocumentSync']['notebookSelector']
assert isinstance(selector, list) |
def copy_codebase(args):
from shutil import copytree, ignore_patterns
new_code_path = os.path.join(args.output_dir, args.logs, args.name, 'code')
if os.path.exists(new_code_path):
print(f'Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment.')
return (- 1)
print(f'Copying codebase to {new_code_path}')
current_code_path = os.path.realpath(__file__)
for _ in range(3):
current_code_path = os.path.dirname(current_code_path)
copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))
print('Done copying code.')
return 1 |
def get_account_and_private_key(account_manager: AccountManager, address: Optional[Address], password_file: Optional[TextIO]) -> Tuple[(Address, PrivateKey)]:
if (not address):
address_hex = prompt_account(account_manager)
else:
address_hex = to_checksum_address(address)
if password_file:
privatekey_bin = unlock_account_with_passwordfile(account_manager=account_manager, address_hex=address_hex, password_file=password_file)
else:
privatekey_bin = unlock_account_with_passwordprompt(account_manager=account_manager, address_hex=address_hex)
return (to_canonical_address(address_hex), privatekey_bin) |
class ConfigFile(pg_api.Settings):
_e_factors = ('path',)
_e_label = 'CONFIGFILE'
def _e_metas(self):
(yield (None, len(self.keys())))
def __init__(self, path, open=open):
self.path = path
self._open = open
self._store = []
self._restore = {}
def __repr__(self):
return ('%s.%s(%r)' % (type(self).__module__, type(self).__name__, self.path))
def _save(self, lines: [str]):
with self._open(self.path, 'w') as cf:
for l in lines:
cf.write(l)
def __delitem__(self, k):
with self._open(self.path) as cf:
lines = alter_config({k: None}, cf)
self._save()
def __getitem__(self, k):
with self._open(self.path) as cfo:
return read_config(cfo, selector=k.__eq__)[k]
def __setitem__(self, k, v):
self.update({k: v})
def __call__(self, **kw):
self._store.insert(0, kw)
def __context__(self):
return self
def __iter__(self):
return self.keys()
def __len__(self):
return len(list(self.keys()))
def __enter__(self):
res = self.getset(self._store[0].keys())
self.update(self._store[0])
del self._store[0]
self._restore.append(res)
def __exit__(self, exc, val, tb):
self._restored.update(self._restore[(- 1)])
del self._restore[(- 1)]
self.update(self._restored)
self._restored.clear()
return (exc is None)
def get(self, k, alt=None):
with self._open(self.path) as cf:
return read_config(cf, selector=k.__eq__).get(k, alt)
def keys(self):
return read_config(self._open(self.path)).keys()
def values(self):
return read_config(self._open(self.path)).values()
def items(self):
return read_config(self._open(self.path)).items()
def update(self, keyvals):
with self._open(self.path) as cf:
lines = alter_config(keyvals, cf)
self._save(lines)
def getset(self, keys):
keys = set(keys)
with self._open(self.path) as cfo:
cfg = read_config(cfo, selector=keys.__contains__)
for x in (keys - set(cfg.keys())):
cfg[x] = None
return cfg |
class RepBlock(nn.Module):
def __init__(self, input_channel, output_channel, kernel_size=3, groups=1, stride=1, deploy=False, use_se=False):
super().__init__()
self.use_se = use_se
self.input_channel = input_channel
self.output_channel = output_channel
self.deploy = deploy
self.kernel_size = kernel_size
self.padding = (kernel_size // 2)
self.groups = groups
self.activation = nn.ReLU()
assert (self.kernel_size == 3)
assert (self.padding == 1)
if (not self.deploy):
self.brb_3x3 = _conv_bn(input_channel, output_channel, kernel_size=self.kernel_size, padding=self.padding, groups=groups)
self.brb_1x1 = _conv_bn(input_channel, output_channel, kernel_size=1, padding=0, groups=groups)
self.brb_identity = (nn.BatchNorm2d(self.input_channel) if (self.input_channel == self.output_channel) else None)
else:
self.brb_rep = nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=self.kernel_size, padding=self.padding, padding_mode='zeros', stride=stride, bias=True)
if self.use_se:
self.se = SEBlock(input_channel, (input_channel // 16))
else:
self.se = nn.Identity()
def forward(self, inputs):
if self.deploy:
return self.activation(self.se(self.brb_rep(inputs)))
if (self.brb_identity == None):
identity_out = 0
else:
identity_out = self.brb_identity(inputs)
return self.activation(self.se(((self.brb_1x1(inputs) + self.brb_3x3(inputs)) + identity_out)))
def _switch_to_deploy(self):
self.deploy = True
(kernel, bias) = self._get_equivalent_kernel_bias()
self.brb_rep = nn.Conv2d(in_channels=self.brb_3x3.conv.in_channels, out_channels=self.brb_3x3.conv.out_channels, kernel_size=self.brb_3x3.conv.kernel_size, padding=self.brb_3x3.conv.padding, padding_mode=self.brb_3x3.conv.padding_mode, stride=self.brb_3x3.conv.stride, groups=self.brb_3x3.conv.groups, bias=True)
self.brb_rep.weight.data = kernel
self.brb_rep.bias.data = bias
for para in self.parameters():
para.detach_()
self.__delattr__('brb_3x3')
self.__delattr__('brb_1x1')
self.__delattr__('brb_identity')
def _pad_1x1_kernel(self, kernel):
if (kernel is None):
return 0
else:
return F.pad(kernel, ([1] * 4))
def _get_equivalent_kernel_bias(self):
(brb_3x3_weight, brb_3x3_bias) = self._fuse_conv_bn(self.brb_3x3)
(brb_1x1_weight, brb_1x1_bias) = self._fuse_conv_bn(self.brb_1x1)
(brb_id_weight, brb_id_bias) = self._fuse_conv_bn(self.brb_identity)
return (((brb_3x3_weight + self._pad_1x1_kernel(brb_1x1_weight)) + brb_id_weight), ((brb_3x3_bias + brb_1x1_bias) + brb_id_bias))
def _fuse_conv_bn(self, branch):
if (branch is None):
return (0, 0)
elif isinstance(branch, nn.Sequential):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if (not hasattr(self, 'id_tensor')):
input_dim = (self.input_channel // self.groups)
kernel_value = np.zeros((self.input_channel, input_dim, 3, 3), dtype=np.float32)
for i in range(self.input_channel):
kernel_value[(i, (i % input_dim), 1, 1)] = 1
self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std)
t = t.view((- 1), 1, 1, 1)
return ((kernel * t), (beta - ((running_mean * gamma) / std))) |
def test_runresult_assertion_on_xfail(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n\n pytest_plugins = "pytester"\n\n .xfail\n def test_potato():\n assert False\n ')
result = pytester.runpytest()
result.assert_outcomes(xfailed=1)
assert (result.ret == 0) |
def decimalToBinaryFixLength(_length, _decimal):
binNum = bin(int(_decimal))[2:]
outputNum = [int(item) for item in binNum]
if (len(outputNum) < _length):
outputNum = np.concatenate((np.zeros(((_length - len(outputNum)),)), np.array(outputNum)))
else:
outputNum = np.array(outputNum)
return outputNum |
_module()
class MaskScoringRCNN(TwoStageDetector):
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None):
super(MaskScoringRCNN, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) |
_fixtures(WebFixture)
def test_column_slots(web_fixture):
fixture = web_fixture
widget = Div(fixture.view).use_layout(ColumnLayout('column_name_a', 'column_name_b').with_slots())
(column_a, column_b) = widget.layout.columns.values()
assert ('column_name_a' in column_a.available_slots)
assert ('column_name_b' in column_b.available_slots) |
class Version():
def __init__(self, vstring=None):
if vstring:
self.parse(vstring)
warnings.warn('distutils Version classes are deprecated. Use packaging.version instead.', DeprecationWarning, stacklevel=2)
def __repr__(self):
return "{} ('{}')".format(self.__class__.__name__, str(self))
def __eq__(self, other):
c = self._cmp(other)
if (c is NotImplemented):
return c
return (c == 0)
def __lt__(self, other):
c = self._cmp(other)
if (c is NotImplemented):
return c
return (c < 0)
def __le__(self, other):
c = self._cmp(other)
if (c is NotImplemented):
return c
return (c <= 0)
def __gt__(self, other):
c = self._cmp(other)
if (c is NotImplemented):
return c
return (c > 0)
def __ge__(self, other):
c = self._cmp(other)
if (c is NotImplemented):
return c
return (c >= 0) |
def test_popup_sticky():
m = Map()
popup = Popup('Some text.', sticky=True).add_to(m)
rendered = popup._template.render(this=popup, kwargs={})
expected = '\n var {popup_name} = L.popup({{\n "autoClose": false, "closeOnClick": false, "maxWidth": "100%"\n }});\n var {html_name} = $(`<div id="{html_name}" style="width: 100.0%; height: 100.0%;">Some text.</div>`)[0];\n {popup_name}.setContent({html_name});\n {map_name}.bindPopup({popup_name});\n '.format(popup_name=popup.get_name(), html_name=list(popup.html._children.keys())[0], map_name=m.get_name())
assert (normalize(rendered) == normalize(expected)) |
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert (len(modules) == len(inputs))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where='in replica {} on device {}'.format(i, device))
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input)) for (i, (module, input)) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs |
def crop_face(image, rotate=True, quiet_mode=True):
(height, width, channels) = image.shape
detections = detector.detect_faces(image)
image = PIL_image_convert(image)
if ((detections == None) or (len(detections) == 0)):
if (not quiet_mode):
print('***No Face detected. ***')
return (None, None)
if (len(detections) > 1):
if (not quiet_mode):
print('*** Multi Faces ,get the face with largest confidence ***')
detection = sorted(detections, key=(lambda x: x['confidence']), reverse=True)[0]
bounding_box = detection['box']
keypoints = detection['keypoints']
(lex, ley) = keypoints['left_eye']
(rex, rey) = keypoints['right_eye']
(rmx, rmy) = keypoints['mouth_right']
(lmx, lmy) = keypoints['mouth_left']
(nex, ney) = keypoints['nose']
if rotate:
angle = calculate_angle(lex, ley, rex, rey)
(image, lex, ley, rex, rey, lmx, lmy, rmx, rmy) = image_rote(image, angle, lex, ley, rex, rey, lmx, lmy, rmx, rmy)
eye_width = (rex - lex)
(ecx, ecy) = (((lex + rex) / 2.0), ((ley + rey) / 2.0))
mouth_width = (rmx - lmx)
(mcx, mcy) = (((lmx + rmx) / 2.0), ((lmy + rmy) / 2.0))
em_height = (mcy - ecy)
(fcx, fcy) = (((ecx + mcx) / 2.0), ((ecy + mcy) / 2.0))
if (eye_width > em_height):
alpha = eye_width
else:
alpha = em_height
g_beta = 2.0
g_left = (fcx - ((alpha / 2.0) * g_beta))
g_upper = (fcy - ((alpha / 2.0) * g_beta))
g_right = (fcx + ((alpha / 2.0) * g_beta))
g_lower = (fcy + ((alpha / 2.0) * g_beta))
g_face = image.crop((g_left, g_upper, g_right, g_lower))
return (g_face, keypoints) |
class Poly(_LRScheduler):
def __init__(self, optimizer, num_epochs, iters_per_epoch=0, warmup_epochs=0, last_epoch=(- 1)):
self.iters_per_epoch = iters_per_epoch
self.cur_iter = 0
self.N = (num_epochs * iters_per_epoch)
self.warmup_iters = (warmup_epochs * iters_per_epoch)
super(Poly, self).__init__(optimizer, last_epoch)
def get_lr(self):
T = ((self.last_epoch * self.iters_per_epoch) + self.cur_iter)
factor = pow((1 - ((1.0 * T) / self.N)), 0.9)
if ((self.warmup_iters > 0) and (T < self.warmup_iters)):
factor = ((1.0 * T) / self.warmup_iters)
self.cur_iter %= self.iters_per_epoch
self.cur_iter += 1
return [(base_lr * factor) for base_lr in self.base_lrs] |
class TestTransformerConvert(unittest.TestCase):
def test_default(self):
tfm = new_transformer()
tfm.convert()
actual_args = tfm.output_format
expected_args = {}
self.assertEqual(expected_args, actual_args)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_samplerate_valid(self):
tfm = new_transformer()
tfm.convert(samplerate=8000)
actual_args = tfm.effects
expected_args = ['rate', '-h', '8000.000000']
self.assertEqual(expected_args, actual_args)
actual_log = tfm.effects_log
expected_log = ['rate']
self.assertEqual(expected_log, actual_log)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm.set_output_format(file_type='raw', rate=8000)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm, test_file_out=False)
tfm.set_output_format(file_type='wav', rate=8000)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm, skip_array_tests=True)
def test_samplerate_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.convert(samplerate=0)
def test_channels_valid(self):
tfm = new_transformer()
tfm.convert(n_channels=3)
actual = tfm.output_format
expected = {'channels': 3}
self.assertEqual(expected, actual)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm.set_output_format(file_type='raw', channels=3)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm, test_file_out=False)
tfm.set_output_format(file_type='wav', channels=3)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm, skip_array_tests=True)
def test_channels_invalid1(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.convert(n_channels=0)
def test_channels_invalid2(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.convert(n_channels=1.5)
def test_bitdepth_valid(self):
tfm = new_transformer()
tfm.convert(bitdepth=8)
actual = tfm.output_format
expected = {'bits': 8}
self.assertEqual(expected, actual)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
def test_bitdepth_valid2(self):
tfm = new_transformer()
tfm.convert(bitdepth=16)
actual = tfm.output_format
expected = {'bits': 16}
self.assertEqual(expected, actual)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm.set_output_format(file_type='raw', bits=16)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm, test_file_out=False)
tfm.set_output_format(file_type='wav', bits=16)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm, skip_array_tests=True)
def test_bitdepth_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.convert(bitdepth=17) |
class Solution(object):
def threeSumClosest(self, nums, target):
ls = len(nums)
sort_nums = sorted(nums)
res = ((nums[0] + nums[1]) + nums[2])
for i in range((ls - 2)):
(j, k) = ((i + 1), (ls - 1))
while (j < k):
temp = ((sort_nums[i] + sort_nums[j]) + sort_nums[k])
if (abs((target - temp)) < abs((target - res))):
res = temp
if (temp < target):
j += 1
else:
k -= 1
return res |
class StereoDepthCamera(Camera):
def __init__(self, camera_cfg: StereoDepthCameraConfig, scene: sapien.Scene, renderer_type: str, articulation: sapien.Articulation=None):
self.camera_cfg = camera_cfg
assert (renderer_type == 'sapien'), renderer_type
self.renderer_type = renderer_type
actor_uid = camera_cfg.actor_uid
if (actor_uid is None):
self.actor = None
else:
if (articulation is None):
self.actor = get_entity_by_name(scene.get_all_actors(), actor_uid)
else:
self.actor = get_entity_by_name(articulation.get_links(), actor_uid)
if (self.actor is None):
raise RuntimeError(f'Mount actor ({actor_uid}) is not found')
sensor_config = StereoDepthSensorConfig()
sensor_config.rgb_resolution = camera_cfg.rgb_resolution
sensor_config.rgb_intrinsic = camera_cfg.rgb_intrinsic
sensor_config.min_depth = camera_cfg.min_depth
if (self.actor is None):
self.camera = StereoDepthSensor(camera_cfg.uid, scene, sensor_config, mount=self.actor)
self.camera.set_pose(camera_cfg.pose)
else:
self.camera = StereoDepthSensor(camera_cfg.uid, scene, sensor_config, mount=self.actor, pose=camera_cfg.pose)
if camera_cfg.hide_link:
self.actor.hide_visual()
self.texture_names = camera_cfg.texture_names
def get_images(self, take_picture=False):
if take_picture:
self.take_picture()
if (self.renderer_type == 'client'):
return {}
images = {}
for name in self.texture_names:
if (name == 'Color'):
image = self.camera._cam_rgb.get_float_texture('Color')
elif (name == 'depth'):
self.camera.compute_depth()
image = self.camera.get_depth()[(..., None)]
elif (name == 'Position'):
self.camera.compute_depth()
position = self.camera._cam_rgb.get_float_texture('Position')
depth = self.camera.get_depth()
position[(..., 2)] = (- depth)
image = position
elif (name == 'Segmentation'):
image = self.camera._cam_rgb.get_uint32_texture('Segmentation')
else:
raise NotImplementedError(name)
images[name] = image
return images
def get_params(self):
return dict(extrinsic_cv=self.camera._cam_rgb.get_extrinsic_matrix(), cam2world_gl=self.camera._cam_rgb.get_model_matrix(), intrinsic_cv=self.camera._cam_rgb.get_intrinsic_matrix())
def observation_space(self) -> spaces.Dict:
obs_spaces = OrderedDict()
(width, height) = (self.camera._cam_rgb.width, self.camera._cam_rgb.height)
for name in self.texture_names:
if (name == 'Color'):
obs_spaces[name] = spaces.Box(low=0, high=1, shape=(height, width, 4), dtype=np.float32)
elif (name == 'Position'):
obs_spaces[name] = spaces.Box(low=(- np.inf), high=np.inf, shape=(height, width, 4), dtype=np.float32)
elif (name == 'Segmentation'):
obs_spaces[name] = spaces.Box(low=np.iinfo(np.uint32).min, high=np.iinfo(np.uint32).max, shape=(height, width, 4), dtype=np.uint32)
else:
raise NotImplementedError(name)
return spaces.Dict(obs_spaces) |
def attach_player_object_to_player(objectplayer: int, object_id: int, attachplayer: int, offset_x: float, offset_y: float, offset_z: float, rotation_x: float, rotation_y: float, rotation_z: float) -> bool:
return AttachPlayerObjectToPlayer(objectplayer, object_id, attachplayer, offset_x, offset_y, offset_z, rotation_x, rotation_y, rotation_z) |
class ParseSelectionArgsTest(unittest.TestCase):
root = None
def ParseTest(self, tuplelist, indices, filelists=[]):
def tuple_fsencode(filetuple):
return tuple(map(os.fsencode, filetuple))
if (not self.root):
self.root = rpath.RPath(Globals.local_connection, 'rdiff-backup_testfiles/select')
self.Select = Select(self.root)
self.Select.parse_selection_args(tuplelist, self.remake_filelists(filelists))
self.assertTrue(iter_equal(iter_map((lambda dsrp: dsrp.index), self.Select.get_select_iter()), map(tuple_fsencode, indices), verbose=1))
def remake_filelists(self, filelist):
new_filelists = []
for f in filelist:
if (isinstance(f, str) or isinstance(f, bytes)):
new_filelists.append(io.BytesIO(os.fsencode(f)))
else:
new_filelists.append(f)
return new_filelists
def testParse(self):
self.ParseTest([('--include', 'rdiff-backup_testfiles/select/1/1'), ('--exclude', '**')], [(), ('1',), ('1', '1'), ('1', '1', '1'), ('1', '1', '2'), ('1', '1', '3')])
def testParse2(self):
self.ParseTest([('--exclude', 'rdiff-backup_testfiles/select/1/1/1'), ('--include', 'rdiff-backup_testfiles/select/1/1'), ('--exclude', 'rdiff-backup_testfiles/select/1'), ('--exclude', '**')], [(), ('1',), ('1', '1'), ('1', '1', '2'), ('1', '1', '3')])
def test_globbing_filelist(self):
self.ParseTest([('--include-globbing-filelist', 'file')], [(), ('1',), ('1', '1'), ('1', '1', '2'), ('1', '1', '3')], ['\n- rdiff-backup_testfiles/select/1/1/1\nrdiff-backup_testfiles/select/1/1\n- rdiff-backup_testfiles/select/1\n- **\n'])
def test_globbing_filelist_winending(self):
self.ParseTest([('--include-globbing-filelist', 'file')], [(), ('1',), ('1', '1'), ('1', '1', '2'), ('1', '1', '3')], ['\n- rdiff-backup_testfiles/select/1/1/1\r\nrdiff-backup_testfiles/select/1/1\r\n- rdiff-backup_testfiles/select/1\r\n- **\r\n'])
def testGlob(self):
self.ParseTest([('--exclude', '**[3-5]'), ('--include', 'rdiff-backup_testfiles/select/1'), ('--exclude', '**')], [(), ('1',), ('1', '1'), ('1', '1', '1'), ('1', '1', '2'), ('1', '2'), ('1', '2', '1'), ('1', '2', '2')])
self.ParseTest([('--include', 'rdiff-backup_testfiles/select**/2'), ('--exclude', '**')], [(), ('1',), ('1', '1'), ('1', '1', '2'), ('1', '2'), ('1', '2', '1'), ('1', '2', '2'), ('1', '2', '3'), ('1', '3'), ('1', '3', '2'), ('2',), ('2', '1'), ('2', '1', '1'), ('2', '1', '2'), ('2', '1', '3'), ('2', '2'), ('2', '2', '1'), ('2', '2', '2'), ('2', '2', '3'), ('2', '3'), ('2', '3', '1'), ('2', '3', '2'), ('2', '3', '3'), ('3',), ('3', '1'), ('3', '1', '2'), ('3', '2'), ('3', '2', '1'), ('3', '2', '2'), ('3', '2', '3'), ('3', '3'), ('3', '3', '2')])
def test_globbing_filelist2(self):
self.ParseTest([('--exclude-globbing-filelist', 'asoeuth')], [(), ('1',), ('1', '1'), ('1', '1', '1'), ('1', '1', '2'), ('1', '2'), ('1', '2', '1'), ('1', '2', '2')], ['\n**[3-5]\n+ rdiff-backup_testfiles/select/1\n**\n'])
self.ParseTest([('--include-globbing-filelist', 'file')], [(), ('1',), ('1', '1'), ('1', '1', '2'), ('1', '2'), ('1', '2', '1'), ('1', '2', '2'), ('1', '2', '3'), ('1', '3'), ('1', '3', '2'), ('2',), ('2', '1'), ('2', '1', '1'), ('2', '1', '2'), ('2', '1', '3'), ('2', '2'), ('2', '2', '1'), ('2', '2', '2'), ('2', '2', '3'), ('2', '3'), ('2', '3', '1'), ('2', '3', '2'), ('2', '3', '3'), ('3',), ('3', '1'), ('3', '1', '2'), ('3', '2'), ('3', '2', '1'), ('3', '2', '2'), ('3', '2', '3'), ('3', '3'), ('3', '3', '2')], ['\nrdiff-backup_testfiles/select**/2\n- **\n'])
def testGlob2(self):
self.ParseTest([('--include', 'rdiff-backup_testfiles/select/*foo*/p*'), ('--exclude', '**')], [(), ('efools',), ('efools', 'ping'), ('foobar',), ('foobar', 'pong')])
self.ParseTest([('--exclude', 'rdiff-backup_testfiles/select/1/1/*'), ('--exclude', 'rdiff-backup_testfiles/select/1/2/**'), ('--exclude', 'rdiff-backup_testfiles/select/1/3**'), ('--include', 'rdiff-backup_testfiles/select/1'), ('--exclude', '**')], [(), ('1',), ('1', '1'), ('1', '2')])
def testGlob3(self):
self.ParseTest([('--include', '**NOTEXIST'), ('--exclude', '**NOTEXISTEITHER'), ('--include', 'rdiff-backup_testfiles/select/efools'), ('--exclude', '**')], [(), ('efools',), ('efools', 'ping')])
def testAlternateRoot(self):
self.root = rpath.RPath(Globals.local_connection, 'rdiff-backup_testfiles/select/1')
self.ParseTest([('--exclude', 'rdiff-backup_testfiles/select/1/[23]')], [(), ('1',), ('1', '1'), ('1', '2'), ('1', '3')])
if sys.platform.startswith('win'):
self.root = rpath.RPath(Globals.local_connection, 'C:/')
self.ParseTest([('--exclude', 'C:/Users/*'), ('--include', 'C:/Users'), ('--exclude', 'C:/')], [(), ('Users',)])
else:
self.root = rpath.RPath(Globals.local_connection, '/')
self.ParseTest([('--exclude', '/home/*'), ('--include', '/home'), ('--exclude', '/')], [(), ('home',)]) |
class PalTrainer(_baseTrainer):
def __init__(self, config: Config, tmpFile: Optional[StrPath], modelFn: Callable[([], Tuple[(BaseCompressor, Distortion)])], optimizer: Type[torch.optim.Optimizer], scheduler: Type[torch.optim.lr_scheduler._LRScheduler], saver: Saver):
if (dist.get_rank() == 0):
raise AttributeError('You should call <MainTrainer> for main process other than <PalTrainer> to save, log necessary information.')
super().__init__(config, tmpFile, modelFn, optimizer, scheduler, saver)
def train(self, trainLoader: DataLoader, trainSampler: DistributedSampler, *_, beforeRunHook: Optional[Callable]=None, afterRunHook: Optional[Callable]=None, epochStartHook: Optional[Callable]=None, epochFinishHook: Optional[Callable]=None, stepStartHook: Optional[Callable]=None, stepFinishHook: Optional[Callable]=None, **__):
return super().train(trainLoader, trainSampler, beforeRunHook=beforeRunHook, afterRunHook=afterRunHook, epochStartHook=epochStartHook, epochFinishHook=epochFinishHook, stepStartHook=stepStartHook, stepFinishHook=stepFinishHook) |
def test_lazy_arguments(manager_nospawn):
def test_func(qtile, value, multiplier=1):
qtile.test_func_output = (value * multiplier)
config = ServerConfig
config.keys = [libqtile.config.Key(['control'], 'j', test_func(10)), libqtile.config.Key(['control'], 'k', test_func(5, multiplier=100))]
manager_nospawn.start(config)
manager_nospawn.c.simulate_keypress(['control'], 'j')
(_, val) = manager_nospawn.c.eval('self.test_func_output')
assert (val == '10')
manager_nospawn.c.simulate_keypress(['control'], 'k')
(_, val) = manager_nospawn.c.eval('self.test_func_output')
assert (val == '500') |
class YesPornPleaseCom(BaseDownloader):
__name__ = 'YesPornPleaseCom'
__type__ = 'downloader'
__version__ = '0.02'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('quality', '240p;360p;480p;720p', 'Quality', '720p')]
__description__ = 'YesPornPlease.Com downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('ondrej', '')]
NAME_PATTERN = '<title>(.+) watch online for free'
def setup(self):
self.resume_download = True
self.multi_dl = True
def process(self, pyfile):
resp = self.load(pyfile.url)
iframe_url = re.findall('<iframe src="([^"]+)', resp)
if (not iframe_url):
self.error(self._('Iframe url not found'))
iframe_resp = self.load((' + iframe_url[0]))
video_url = re.findall('<source src="([^"]+)', iframe_resp)
if (not video_url):
self.error(self._('Video url not found'))
self.pyfile.name = re.findall(self.NAME_PATTERN, resp)[0]
self.pyfile.name += ('.' + video_url[0].split('.')[(- 1)])
self.log_info(self._('Downloading file...'))
quality = self.config.get('quality')
quality_index = ['720p', '480p', '360p', '240p']
q = quality_index.index(quality)
self.download(video_url[q]) |
def get_matching_robots(name_prefix, username, limit=10):
admined_orgs = _basequery.get_user_organizations(username).switch(Team).join(TeamRole).where((TeamRole.name == 'admin'))
prefix_checks = False
for org in admined_orgs:
org_search = prefix_search(User.username, ((org.username + '+') + name_prefix))
prefix_checks = (prefix_checks | org_search)
user_search = prefix_search(User.username, ((username + '+') + name_prefix))
prefix_checks = (prefix_checks | user_search)
return User.select().where(prefix_checks).limit(limit) |
def get_bpm_from_data(data, sampling_rate):
onset_env = librosa.onset.onset_strength(y=data, sr=sampling_rate)
wav_tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sampling_rate)
print(f'{ULTRASINGER_HEAD} BPM is {blue_highlighted(str(round(wav_tempo[0], 2)))}')
return wav_tempo[0] |
def main(old_args=False):
if old_args:
from .config.compat import compat_setup
(cfg, args) = compat_setup()
else:
args = parse_opt()
append_datetime = ((args.resume is None) and args.timestamp)
cfg = setup(args, modify_exp_name=append_datetime)
if (args.resume is not None):
logger = SummaryWriter(args.resume)
else:
logger = SummaryWriter(logdir=os.path.join(cfg.OUTPUT_DIR, cfg.EXP_NAME))
print('Log files saved to', logger.file_writer.get_logdir())
if args.save_config:
with open(os.path.join(logger.file_writer.get_logdir(), 'config.yaml'), 'w') as fh:
fh.write(cfg.dump())
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
logger.add_text('git_sha', sha)
dataset_dict = load_dataset(cfg)
if (cfg.MODEL.TEXT_MODEL.TOKENIZER == 'simple'):
texts = dataset_dict['train'].get_all_texts()
else:
texts = None
(model, task) = build_model(cfg, texts, strict_loading=args.strict_loading)
optimizer = get_optimizer(cfg, model)
if (args.resume is not None):
print(('loading from: %s' % args.resume))
loaded_dict = torch.load((logger.file_writer.get_logdir() + '/latest_checkpoint.pth'))
model.load_state_dict(loaded_dict['model_state_dict'])
iteration = loaded_dict['it']
else:
iteration = 0
if (task == 'metric'):
trainer = MetricTrainer(cfg, logger, dataset_dict, model, optimizer, iteration)
else:
trainer = Trainer(cfg, logger, dataset_dict, model, optimizer, iteration)
if args.debug:
import IPython
IPython.embed()
if args.train:
iteration = trainer.train()
if args.eval:
results = trainer.run_eval(eval_on_test=args.final_eval_on_test)
results = {key: val for (key, val) in results}
results_file = os.path.join(logger.file_writer.get_logdir(), f'{cfg.DATASET.NAME}-{iteration}-eval.json')
with open(results_file, 'w') as fh:
json.dump(results, fh)
print(f'Evaluation results saved to {results_file}')
logger.close() |
def format_script_list(scripts):
if (not scripts):
return '<No scripts>'
table = EvTable('|wdbref|n', '|wobj|n', '|wkey|n', '|wintval|n', '|wnext|n', '|wrept|n', '|wdb', '|wtypeclass|n', '|wdesc|n', align='r', border='tablecols')
for script in scripts:
nextrep = script.time_until_next_repeat()
if (nextrep is None):
nextrep = ('PAUS' if script.db._paused_time else '--')
else:
nextrep = ('%ss' % nextrep)
maxrepeat = script.repeats
if maxrepeat:
rept = ('%i/%i' % ((maxrepeat - script.remaining_repeats()), maxrepeat))
else:
rept = '-/-'
table.add_row(script.id, (script.obj.key if (hasattr(script, 'obj') and script.obj) else '<Global>'), script.key, (script.interval if (script.interval > 0) else '--'), nextrep, rept, ('*' if script.persistent else '-'), script.typeclass_path.rsplit('.', 1)[(- 1)], crop(script.desc, width=20))
return ('%s' % table) |
def get_points_array(iterable):
(first_choice, backup) = tee(iterable)
try:
if HAS_SHAPELY:
data = np.vstack([(np.array(shape.centroid.coords)[0] if isinstance(shape, BaseGeometry) else np.array(shape.centroid)) for shape in first_choice])
else:
data = np.vstack([np.array(shape.centroid) for shape in first_choice])
except AttributeError:
data = np.vstack(list(backup))
return data |
def RegisterCalibration(client, name, default):
calibration = client.register(CalibrationProperty(name, default))
calibration.age = client.register(AgeValue((name + '.calibration.age')))
calibration.locked = client.register(BooleanProperty((name + '.calibration.locked'), False, persistent=True))
calibration.sigmapoints = client.register(RoundedValue((name + '.calibration.sigmapoints'), False))
calibration.points = client.register(RoundedValue((name + '.calibration.points'), False, persistent=True))
calibration.log = client.register(Property((name + '.calibration.log'), ''))
return calibration |
def test_emoji():
vol = Volume(emoji=True)
vol.volume = (- 1)
vol._update_drawer()
assert (vol.text == '')
vol.volume = 29
vol._update_drawer()
assert (vol.text == '')
vol.volume = 79
vol._update_drawer()
assert (vol.text == '')
vol.volume = 80
vol._update_drawer()
assert (vol.text == '') |
class Server(_EventDispatcher):
def __init__(self, address, port):
self._address = address
self._port = port
self._server = None
self._thread = _threading.Thread(target=self._run, daemon=True)
self._thread.start()
blurb = f'Server listening on {address}:{port}'
assert _debug_net(f'''{('-' * len(blurb))}
{blurb}
{('-' * len(blurb))}''')
async def handle_connection(self, reader, writer):
connection = ClientConnection(reader, writer)
self.dispatch_event('on_connection', connection)
async def _start_server(self):
self._server = (await _asyncio.start_server(self.handle_connection, self._address, self._port))
async with self._server:
(await self._server.serve_forever())
def _run(self):
try:
_asyncio.run(self._start_server())
except KeyboardInterrupt:
self._server.close()
def on_connection(self, connection):
assert _debug_net(f'Connected <--- {connection}')
connection.set_handler('on_disconnect', self.on_disconnect)
def on_disconnect(self, connection):
assert _debug_net(f'Disconnected ---> {connection}') |
def write_multi_ref(refdir, docid, summary):
def write(outfile, sents):
with open(outfile, 'w', encoding=ENCODE) as fout:
fout.write('\n'.join(sents))
fout.write('\n')
summary_line = ((' ' + SENT_SEP) + ' ').join(summary)
summaries = summary_line.strip().split(((' ' + SUM_SEP) + ' '))
for sum_item in summaries:
fds = sum_item.split('\t')
assert (len(fds) == 2)
label = fds[0]
sum_sents = fds[1].strip().split(((' ' + SENT_SEP) + ' '))
sum_sents = [sent.strip() for sent in sum_sents]
fname = os.path.join(refdir, '{}.{}.gold'.format(label, docid))
write(fname, sum_sents) |
class TestHAProxyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('HAProxyCollector', {'interval': 10})
self.collector = HAProxyCollector(config, None)
def test_import(self):
self.assertTrue(HAProxyCollector)
(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
self.collector.config['ignore_servers'] = False
patch_urlopen = patch('urllib2.urlopen', Mock(return_value=self.getFixture('stats.csv')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = self.getPickledResults('real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_unix_socket_code_path(self, publish_mock):
self.collector.config['method'] = 'unix'
class MockSocket():
def __init__(*args, **kwargs):
self.connected = False
self.output_data = ''
def connect(*args, **kwargs):
self.connected = True
def send(obj, string, *args, **kwargs):
if (not self.connected):
raise Exception('MockSocket: Endpoint not connected.')
if (string == 'show stat\n'):
self.output_data = self.getFixture('stats.csv').getvalue()
def recv(obj, bufsize, *args, **kwargs):
output_buffer = self.output_data[:bufsize]
self.output_data = self.output_data[bufsize:]
return output_buffer
patch_socket = patch('socket.socket', Mock(return_value=MockSocket()))
patch_socket.start()
self.collector.collect()
patch_socket.stop()
metrics = self.getPickledResults('real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_real_data_and_ignore_servers(self, publish_mock):
self.collector.config['ignore_servers'] = True
patch_urlopen = patch('urllib2.urlopen', Mock(return_value=self.getFixture('stats.csv')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = self.getPickledResults('real_data_ignore_servers.pkl')
self.assertPublishedMany(publish_mock, metrics) |
def get_data():
test1 = 'I am very happy to see you again!'
test2 = 'Durian model is a very good speech synthesis!'
test3 = 'When I was twenty, I fell in love with a girl.'
test4 = 'I remove attention module in decoder and use average pooling to implement predicting r frames at once'
test5 = 'You can not improve your past, but you can improve your future. Once time is wasted, life is wasted.'
test6 = 'Death comes to all, but great achievements raise a monument which shall endure until the sun grows old.'
data_list = list()
data_list.append(text.text_to_sequence(test1, hp.text_cleaners))
data_list.append(text.text_to_sequence(test2, hp.text_cleaners))
data_list.append(text.text_to_sequence(test3, hp.text_cleaners))
data_list.append(text.text_to_sequence(test4, hp.text_cleaners))
data_list.append(text.text_to_sequence(test5, hp.text_cleaners))
data_list.append(text.text_to_sequence(test6, hp.text_cleaners))
return data_list |
class SliceObjectAction(BaseAction):
valid_actions = {'SliceObject', 'OpenObject', 'CloseObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
subgoal = expert_plan[goal_idx]['planner_action']
(reward, done) = (self.rewards['neutral'], False)
target_object = game_util.get_object(subgoal['objectId'], state.metadata)
if (target_object is not None):
is_target_sliced = target_object['isSliced']
(reward, done) = ((self.rewards['positive'], True) if is_target_sliced else (self.rewards['negative'], False))
return (reward, done) |
def hrnet_w18(pretrained=False):
import yaml
hrnet_cfg = os.path.join('./models', 'model_info', 'hrnet_w18.yml')
with open(hrnet_cfg, 'r') as stream:
hrnet_cfg = yaml.safe_load(stream)
model = HighResolutionNet(hrnet_cfg)
if pretrained:
pretrained_weights = os.path.join(PROJECT_ROOT_DIR, 'models', 'pretrained_models', 'hrnet_w18_small_model_v2.pth')
if os.path.exists(pretrained_weights):
model.init_weights(pretrained_weights)
else:
raise AssertionError('Error: No pretrained weights found for HRNet18. \n Download weights from and save them to {}'.format(pretrained_weights))
return model |
_module()
class GlobalAveragePooling(nn.Module):
def __init__(self):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d((1, 1))
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple([out.view(x.size(0), (- 1)) for (out, x) in zip(outs, inputs)])
elif isinstance(inputs, list):
outs = [self.gap(x) for x in inputs]
outs = [out.view(x.size(0), (- 1)) for (out, x) in zip(outs, inputs)]
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), (- 1))
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs |
class PyGameLCD1602Render():
def __init__(self, caption='LCD 1602'):
self.unit = 10
self.letter_size = (5, 8)
self.text_scale = (16, 2)
(self.space, self.gap) = ((self.unit * 4), (self.unit // 2))
size = ((width := ((((2 * self.space) + (self.gap * self.text_scale[0])) - self.gap) + ((self.unit * self.text_scale[0]) * self.letter_size[0]))), (height := ((((2 * self.space) + (self.gap * self.text_scale[1])) - self.gap) + ((self.unit * self.text_scale[1]) * self.letter_size[1]))))
pygame.init()
self.win = pygame.display.set_mode(size, flags=pygame.NOFRAME)
pygame.display.set_caption(caption)
pygame.draw.rect(self.win, '#c9f6cd', ((self.unit, self.unit), ((width - (self.unit * 2)), (height - (self.unit * 2)))), border_radius=self.unit)
row_num = (self.letter_size[1] * self.text_scale[1])
col_num = (self.letter_size[0] * self.text_scale[0])
for i in range(row_num):
for j in range(col_num):
self.draw_pixel(i, j, False)
pygame.display.update()
def draw_pixel(self, i, j, activate=True):
x = ((self.space + (self.unit * j)) + (self.gap * (j // self.letter_size[0])))
y = ((self.space + (self.unit * i)) + (self.gap * (i // self.letter_size[1])))
color = ('#446644' if activate else '#bbeebb')
pygame.draw.rect(self.win, color, ((x, y), ((self.unit - 1), (self.unit - 1))))
def check_event(self):
pygame.event.get()
def quit(self):
pygame.quit()
sys.exit(0) |
def test_add_existing_plugin_updates_if_requested(tester: CommandTester, repo: TestRepository, installed: TestRepository) -> None:
pyproject = SelfCommand.get_default_system_pyproject_file()
with open(pyproject, 'w', encoding='utf-8', newline='') as f:
f.write(f'''[tool.poetry]
name = "poetry-instance"
version = "1.2.0"
description = "Python dependency management and packaging made easy."
authors = []
[tool.poetry.dependencies]
python = "^3.6"
[tool.poetry.group.{SelfCommand.ADDITIONAL_PACKAGE_GROUP}.dependencies]
poetry-plugin = "^1.2.3"
''')
installed.add_package(Package('poetry-plugin', '1.2.3'))
repo.add_package(Package('poetry-plugin', '1.2.3'))
repo.add_package(Package('poetry-plugin', '2.3.4'))
tester.execute('poetry-')
expected = 'Using version ^2.3.4 for poetry-plugin\n\nUpdating dependencies\nResolving dependencies...\n\nPackage operations: 0 installs, 1 update, 0 removals\n\n - Updating poetry-plugin (1.2.3 -> 2.3.4)\n\nWriting lock file\n'
assert_plugin_add_result(tester, expected, '^2.3.4') |
('pickle')
def test_frame_wise_torch_data_loader():
import torch
from torch.utils import data as data_utils
(X, Y) = _get_small_datasets(padded=False)
lengths = np.array([len(x) for x in X], dtype=int)
X = MemoryCacheFramewiseDataset(X, lengths, cache_size=len(X))
Y = MemoryCacheFramewiseDataset(Y, lengths, cache_size=len(Y))
class TorchDataset(data_utils.Dataset):
def __init__(self, X, Y):
self.X = X
self.Y = Y
def __getitem__(self, idx):
return (torch.from_numpy(self.X[idx]), torch.from_numpy(self.Y[idx]))
def __len__(self):
return len(self.X)
def __test(X, Y, batch_size):
dataset = TorchDataset(X, Y)
loader = data_utils.DataLoader(dataset, batch_size=batch_size, num_workers=0, shuffle=True)
for (_, (x, y)) in enumerate(loader):
assert (len(x.shape) == 2)
assert (len(y.shape) == 2)
(yield (__test, X, Y, 128))
(yield (__test, X, Y, 256)) |
class Mobile_Wallet(Imported_Wallet):
wallet_type = 'mobile'
def __init__(self, db: 'WalletDB', storage: WalletStorage, *, config: SimpleConfig):
if (not hasattr(db, 'imported_addresses')):
db.imported_addresses = {}
Imported_Wallet.__init__(self, db, storage, config=config)
self.use_change = False
self.gap_limit = 10
def can_import_address(self):
return False
def can_delete_address(self):
return False
def synchronize(self):
keys = []
addr_count = len(self.get_addresses())
for i in range(0, (self.gap_limit - addr_count)):
(secret, compressed) = self.keystore.derive_privkey([0, (addr_count + i)], None)
keys.append(serialize_privkey(secret, compressed, 'p2pkh', internal_use=True))
self.import_private_keys(keys, None, write_to_disk=False) |
class Wav2Vec2PreTrainer(Trainer):
def __init__(self, *args, max_gumbel_temp=1, min_gumbel_temp=0, gumbel_temp_decay=1.0, **kwargs):
super().__init__(*args, **kwargs)
self.num_update_step = 0
self.max_gumbel_temp = max_gumbel_temp
self.min_gumbel_temp = min_gumbel_temp
self.gumbel_temp_decay = gumbel_temp_decay
def training_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])]) -> torch.Tensor:
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if ((self.args.n_gpu > 1) or self.deepspeed):
if (model.module.config.ctc_loss_reduction == 'mean'):
loss = loss.mean()
elif (model.module.config.ctc_loss_reduction == 'sum'):
loss = (loss.sum() / inputs['mask_time_indices'].sum())
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if (self.args.gradient_accumulation_steps > 1):
loss = (loss / self.args.gradient_accumulation_steps)
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
self.num_update_step += 1
if ((self.args.n_gpu > 1) or self.deepspeed):
model.module.set_gumbel_temperature(max((self.max_gumbel_temp * (self.gumbel_temp_decay ** self.num_update_step)), self.min_gumbel_temp))
else:
model.set_gumbel_temperature(max((self.max_gumbel_temp * (self.gumbel_temp_decay ** self.num_update_step)), self.min_gumbel_temp))
return loss.detach() |
class EmbedCancel(discord.ui.Button):
view: EmbedBuilder
def __init__(self):
super().__init__(label='Cancel', style=discord.ButtonStyle.red)
async def callback(self, interaction: discord.Interaction) -> T.Any:
(await interaction.response.send_message(f'{emote.xmark} | Embed sending cancelled.', ephemeral=True))
(await self.view.on_timeout()) |
def download_PF_willow(dest='datasets/proposal-flow-willow'):
print('Fetching PF Willow dataset ')
url = '
file_path = join(dest, basename(url))
download_and_uncompress(url, file_path)
print('Downloading image pair list \n')
url = '
file_path = join(dest, basename(url))
download_and_uncompress(url, file_path) |
def select_device(device='', batch_size=None):
s = f'YOLOv5 {(git_describe() or date_modified())} torch {torch.__version__} '
device = str(device).strip().lower().replace('cuda:', '')
cpu = (device == 'cpu')
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
elif device:
os.environ['CUDA_VISIBLE_DEVICES'] = device
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'
cuda = ((not cpu) and torch.cuda.is_available())
if cuda:
devices = (device.split(',') if device else '0')
n = len(devices)
if ((n > 1) and batch_size):
assert ((batch_size % n) == 0), f'batch-size {batch_size} not multiple of GPU count {n}'
space = (' ' * (len(s) + 1))
for (i, d) in enumerate(devices):
p = torch.cuda.get_device_properties(i)
s += f'''{('' if (i == 0) else space)}CUDA:{d} ({p.name}, {(p.total_memory / (1024 ** 2))}MB)
'''
else:
s += 'CPU\n'
LOGGER.info((s.encode().decode('ascii', 'ignore') if (platform.system() == 'Windows') else s))
return torch.device(('cuda:0' if cuda else 'cpu')) |
class DummyStatefulDataLoader():
def __init__(self, dataloader: DataLoader) -> None:
self.dataloader = dataloader
self.state_dict_call_count = 0
self.load_state_dict_call_count = 0
def state_dict(self) -> Dict[(str, Any)]:
self.state_dict_call_count += 1
return {}
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
self.load_state_dict_call_count += 1
return None
def __iter__(self) -> Iterator[object]:
return iter(self.dataloader) |
def test_specific_unknown(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
with project_path.as_cwd():
result = hatch('env', 'show', 'foo', '--ascii')
assert (result.exit_code == 1), result.output
assert (helpers.remove_trailing_spaces(result.output) == helpers.dedent('\n Environment `foo` is not defined by project config\n ')) |
def pin_memory_batch(batch):
if torch.is_tensor(batch):
return batch.pin_memory()
elif isinstance(batch, string_classes):
return batch
elif isinstance(batch, collections.Mapping):
return {k: pin_memory_batch(sample) for (k, sample) in batch.items()}
elif isinstance(batch, collections.Sequence):
return [pin_memory_batch(sample) for sample in batch]
else:
return batch |
def test_ignored_extension(monkeypatch):
config = {'ignore': ['--option'], 'comment': []}
monkeypatch.setattr(interactive, 'get_config', (lambda x: config[x]))
parser = ArgumentParser()
fake_extension = Mock(flag='--option')
action = parser.add_argument('--option', dest='extensions', action='append_const', const=fake_extension)
text = interactive.all_examples(parser, [action], {'extensions': [fake_extension]})
assert ('--option' not in text) |
class OrientedPushOracle(py_policy.PyPolicy):
def __init__(self, env, action_noise_std=0.0):
super(OrientedPushOracle, self).__init__(env.time_step_spec(), env.action_spec())
self._env = env
self._np_random_state = np.random.RandomState(0)
self.phase = 'move_to_pre_block'
self._action_noise_std = action_noise_std
def reset(self):
self.phase = 'move_to_pre_block'
def get_theta_from_vector(self, vector):
return np.arctan2(vector[1], vector[0])
def theta_to_rotation2d(self, theta):
r = np.array([[np.cos(theta), (- np.sin(theta))], [np.sin(theta), np.cos(theta)]])
return r
def rotate(self, theta, xy_dir_block_to_ee):
rot_2d = self.theta_to_rotation2d(theta)
return (rot_2d xy_dir_block_to_ee)
def _get_action_info(self, time_step, block, target):
xy_block = time_step.observation[('%s_translation' % block)][:2]
theta_block = time_step.observation[('%s_orientation' % block)]
xy_target = time_step.observation[('%s_translation' % target)][:2]
xy_ee = time_step.observation['effector_target_translation'][:2]
xy_block_to_target = (xy_target - xy_block)
xy_dir_block_to_target = (xy_block_to_target / np.linalg.norm(xy_block_to_target))
theta_to_target = self.get_theta_from_vector(xy_dir_block_to_target)
theta_error = (theta_to_target - theta_block)
while (theta_error > (np.pi / 4)):
theta_error -= (np.pi / 2.0)
while (theta_error < ((- np.pi) / 4)):
theta_error += (np.pi / 2.0)
xy_pre_block = (xy_block + ((- xy_dir_block_to_target) * 0.05))
xy_nexttoblock = (xy_block + ((- xy_dir_block_to_target) * 0.03))
xy_touchingblock = (xy_block + ((- xy_dir_block_to_target) * 0.01))
xy_delta_to_nexttoblock = (xy_nexttoblock - xy_ee)
xy_delta_to_touchingblock = (xy_touchingblock - xy_ee)
xy_block_to_ee = (xy_ee - xy_block)
xy_dir_block_to_ee = (xy_block_to_ee / np.linalg.norm(xy_block_to_ee))
theta_threshold_to_orient = 0.2
theta_threshold_flat_enough = 0.03
return pushing_info_module.PushingInfo(xy_block=xy_block, xy_ee=xy_ee, xy_pre_block=xy_pre_block, xy_delta_to_nexttoblock=xy_delta_to_nexttoblock, xy_delta_to_touchingblock=xy_delta_to_touchingblock, xy_dir_block_to_ee=xy_dir_block_to_ee, theta_threshold_to_orient=theta_threshold_to_orient, theta_threshold_flat_enough=theta_threshold_flat_enough, theta_error=theta_error)
def _get_move_to_preblock(self, xy_pre_block, xy_ee):
max_step_velocity = 0.3
xy_delta_to_preblock = (xy_pre_block - xy_ee)
diff = np.linalg.norm(xy_delta_to_preblock)
if (diff < 0.001):
self.phase = 'move_to_block'
xy_delta = xy_delta_to_preblock
return (xy_delta, max_step_velocity)
def _get_move_to_block(self, xy_delta_to_nexttoblock, theta_threshold_to_orient, theta_error):
diff = np.linalg.norm(xy_delta_to_nexttoblock)
if (diff < 0.001):
self.phase = 'push_block'
if (theta_error > theta_threshold_to_orient):
self.phase = 'orient_block_left'
if (theta_error < (- theta_threshold_to_orient)):
self.phase = 'orient_block_right'
xy_delta = xy_delta_to_nexttoblock
return xy_delta
def _get_push_block(self, theta_error, theta_threshold_to_orient, xy_delta_to_touchingblock):
if (theta_error > theta_threshold_to_orient):
self.phase = 'move_to_pre_block'
if (theta_error < (- theta_threshold_to_orient)):
self.phase = 'move_to_pre_block'
xy_delta = xy_delta_to_touchingblock
return xy_delta
def _get_orient_block_left(self, xy_dir_block_to_ee, orient_circle_diameter, xy_block, xy_ee, theta_error, theta_threshold_flat_enough):
xy_dir_block_to_ee = self.rotate(0.2, xy_dir_block_to_ee)
xy_block_to_ee = (xy_dir_block_to_ee * orient_circle_diameter)
xy_push_left_spot = (xy_block + xy_block_to_ee)
xy_delta = (xy_push_left_spot - xy_ee)
if (theta_error < theta_threshold_flat_enough):
self.phase = 'move_to_pre_block'
return xy_delta
def _get_orient_block_right(self, xy_dir_block_to_ee, orient_circle_diameter, xy_block, xy_ee, theta_error, theta_threshold_flat_enough):
xy_dir_block_to_ee = self.rotate((- 0.2), xy_dir_block_to_ee)
xy_block_to_ee = (xy_dir_block_to_ee * orient_circle_diameter)
xy_push_left_spot = (xy_block + xy_block_to_ee)
xy_delta = (xy_push_left_spot - xy_ee)
if (theta_error > (- theta_threshold_flat_enough)):
self.phase = 'move_to_pre_block'
return xy_delta
def _get_action_for_block_target(self, time_step, block='block', target='target'):
max_step_velocity = 0.35
info = self._get_action_info(time_step, block, target)
if (self.phase == 'move_to_pre_block'):
(xy_delta, max_step_velocity) = self._get_move_to_preblock(info.xy_pre_block, info.xy_ee)
if (self.phase == 'move_to_block'):
xy_delta = self._get_move_to_block(info.xy_delta_to_nexttoblock, info.theta_threshold_to_orient, info.theta_error)
if (self.phase == 'push_block'):
xy_delta = self._get_push_block(info.theta_error, info.theta_threshold_to_orient, info.xy_delta_to_touchingblock)
orient_circle_diameter = 0.025
if ((self.phase == 'orient_block_left') or (self.phase == 'orient_block_right')):
max_step_velocity = 0.15
if (self.phase == 'orient_block_left'):
xy_delta = self._get_orient_block_left(info.xy_dir_block_to_ee, orient_circle_diameter, info.xy_block, info.xy_ee, info.theta_error, info.theta_threshold_flat_enough)
if (self.phase == 'orient_block_right'):
xy_delta = self._get_orient_block_right(info.xy_dir_block_to_ee, orient_circle_diameter, info.xy_block, info.xy_ee, info.theta_error, info.theta_threshold_flat_enough)
if (self._action_noise_std != 0.0):
xy_delta += (self._np_random_state.randn(2) * self._action_noise_std)
max_step_distance = (max_step_velocity * (1 / self._env.get_control_frequency()))
length = np.linalg.norm(xy_delta)
if (length > max_step_distance):
xy_direction = (xy_delta / length)
xy_delta = (xy_direction * max_step_distance)
return xy_delta
def _action(self, time_step, policy_state):
if time_step.is_first():
self.reset()
xy_delta = self._get_action_for_block_target(time_step, block='block', target='target')
return policy_step.PolicyStep(action=np.asarray(xy_delta, dtype=np.float32)) |
def pjit_with_cpu_fallback(fun: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[(int, Sequence[int])]=(), donate_argnums: Union[(int, Sequence[int])]=(), backend: Optional[str]=None):
if (jax.devices(backend)[0].platform == 'cpu'):
return jax.jit(fun, static_argnums=static_argnums, donate_argnums=donate_argnums)
else:
return jax_pjit(fun, in_axis_resources, out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums) |
class BalancedPositiveNegativeSamplerTest(tf.test.TestCase):
def test_subsample_all_examples(self):
numpy_labels = np.random.permutation(300)
indicator = tf.constant((np.ones(300) == 1))
numpy_labels = ((numpy_labels - 200) > 0)
labels = tf.constant(numpy_labels)
sampler = balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()
is_sampled = sampler.subsample(indicator, 64, labels)
with self.test_session() as sess:
is_sampled = sess.run(is_sampled)
self.assertTrue((sum(is_sampled) == 64))
self.assertTrue((sum(np.logical_and(numpy_labels, is_sampled)) == 32))
self.assertTrue((sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 32))
def test_subsample_selection(self):
numpy_labels = np.arange(100)
numpy_indicator = (numpy_labels < 90)
indicator = tf.constant(numpy_indicator)
numpy_labels = ((numpy_labels - 80) >= 0)
labels = tf.constant(numpy_labels)
sampler = balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()
is_sampled = sampler.subsample(indicator, 64, labels)
with self.test_session() as sess:
is_sampled = sess.run(is_sampled)
self.assertTrue((sum(is_sampled) == 64))
self.assertTrue((sum(np.logical_and(numpy_labels, is_sampled)) == 10))
self.assertTrue((sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 54))
self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator))
def test_raises_error_with_incorrect_label_shape(self):
labels = tf.constant([[True, False, False]])
indicator = tf.constant([True, False, True])
sampler = balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()
with self.assertRaises(ValueError):
sampler.subsample(indicator, 64, labels)
def test_raises_error_with_incorrect_indicator_shape(self):
labels = tf.constant([True, False, False])
indicator = tf.constant([[True, False, True]])
sampler = balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()
with self.assertRaises(ValueError):
sampler.subsample(indicator, 64, labels) |
def load_data_train(opt):
dataset = dset.ImageFolder(root=((opt.data_path + '/') + opt.normal_class), transform=transforms.Compose([transforms.Grayscale(), transforms.Resize((45, 45)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_threads, drop_last=opt.drop_last)
return dataloader |
def main(args):
pdf = pdfium.PdfDocument.new()
for fp in args.images:
image_obj = pdfium.PdfImage.new(pdf)
if (fp.suffix.lower() in ('.jpg', '.jpeg')):
image_obj.load_jpeg(fp, inline=args.inline)
else:
pil_image = PIL.Image.open(fp)
bitmap = pdfium.PdfBitmap.from_pil(pil_image)
pil_image.close()
image_obj.set_bitmap(bitmap)
bitmap.close()
(w, h) = image_obj.get_size()
image_obj.set_matrix(pdfium.PdfMatrix().scale(w, h))
page = pdf.new_page(w, h)
page.insert_obj(image_obj)
page.gen_content()
image_obj.close()
page.close()
pdf.save(args.output)
pdf.close() |
class FakeScreenConfig(Config):
auto_fullscreen = True
groups = [libqtile.config.Group('a'), libqtile.config.Group('b'), libqtile.config.Group('c'), libqtile.config.Group('d')]
layouts = [layout.Max(), layout.RatioTile(), layout.Tile()]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
fake_screens = [Screen(bottom=bar.Bar([widget.GroupBox(this_screen_border=CHAM3, borderwidth=1, fontsize=FONTSIZE, padding=1, margin_x=1, margin_y=1), widget.AGroupBox(), widget.Prompt(), widget.Sep(), widget.WindowName(fontsize=FONTSIZE, margin_x=6), widget.Sep(), widget.CPUGraph(**GRAPH_KW), widget.MemoryGraph(**GRAPH_KW), widget.SwapGraph(foreground='20C020', **GRAPH_KW), widget.Sep(), widget.Clock(format='%H:%M:%S %d.%manager.%Y', fontsize=FONTSIZE, padding=6)], 24, background='#555555'), left=bar.Gap(16), right=bar.Gap(20), x=0, y=0, width=500, height=340), Screen(top=bar.Bar([widget.GroupBox(), widget.WindowName(), widget.Clock()], 30), bottom=bar.Gap(24), left=bar.Gap(12), x=500, y=0, width=300, height=380), Screen(top=bar.Bar([widget.GroupBox(), widget.WindowName(), widget.Clock()], 30), bottom=bar.Gap(16), right=bar.Gap(40), x=0, y=340, width=450, height=220), Screen(top=bar.Bar([widget.GroupBox(), widget.WindowName(), widget.Clock()], 30), left=bar.Gap(20), right=bar.Gap(24), x=450, y=380, width=350, height=220)]
screens = [] |
class RandomMixing(nn.Module):
def __init__(self, num_tokens=196, **kwargs):
super().__init__()
self.random_matrix = nn.parameter.Parameter(data=torch.softmax(torch.rand(num_tokens, num_tokens), dim=(- 1)), requires_grad=False)
def forward(self, x):
(B, H, W, C) = x.shape
x = x.reshape(B, (H * W), C)
x = torch.einsum('mn, bnc -> bmc', self.random_matrix, x)
x = x.reshape(B, H, W, C)
return x |
class ParserSuite(DataSuite):
required_out_section = True
base_path = '.'
files = find_test_files(pattern='parse*.test', exclude=['parse-errors.test'])
if (sys.version_info < (3, 10)):
files.remove('parse-python310.test')
def run_case(self, testcase: DataDrivenTestCase) -> None:
test_parser(testcase) |
.grid
def test_transformer__only_best():
with (nullcontext() if PROJ_GTE_92 else pytest.raises(NotImplementedError, match='only_best requires PROJ 9.2')):
transformer = Transformer.from_crs(4326, 2964, only_best=True)
if (not grids_available('ca_nrc_ntv2_0.tif')):
with pytest.raises(ProjError, match='Grid ca_nrc_ntv2_0.tif is not available.'):
transformer.transform(60, (- 100), errcheck=True) |
def main():
make_warnings_comments()
parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description='Lark Stand-alone Generator Tool', parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options')
parser.add_argument('-c', '--compress', action='store_true', default=0, help='Enable compression')
if (len(sys.argv) == 1):
parser.print_help(sys.stderr)
sys.exit(1)
ns = parser.parse_args()
(lark_inst, out) = build_lalr(ns)
gen_standalone(lark_inst, out=out, compress=ns.compress)
ns.out.close()
ns.grammar_file.close() |
class TestAssertUsageVarType(TestCaseUsage):
def test_success(self):
var = usage.UsageVariable('a', (lambda : None), 'artifact', None)
usage.assert_usage_var_type(var, 'artifact')
self.assertTrue(True)
def test_failure(self):
var = usage.UsageVariable('a', (lambda : None), 'artifact', None)
with self.assertRaisesRegex(AssertionError, 'Incorrect.*a,.*visualization.*artifact'):
usage.assert_usage_var_type(var, 'visualization') |
def cached_property(func: typing.Callable) -> property:
cached_name = f'_cached_{func}'
sentinel = object()
def inner(instance: object):
cache = getattr(instance, cached_name, sentinel)
if (cache is not sentinel):
return cache
result = func(instance)
setattr(instance, cached_name, result)
return result
return property(inner) |
def test_waitid_eintr() -> None:
from .._subprocess_platform import wait_child_exiting
if (TYPE_CHECKING and ((sys.platform == 'win32') or (sys.platform == 'darwin'))):
return
if (not wait_child_exiting.__module__.endswith('waitid')):
pytest.skip('waitid only')
from .._subprocess_platform.waitid import sync_wait_reapable
got_alarm = False
sleeper = subprocess.Popen(['sleep', '3600'])
def on_alarm(sig: int, frame: (FrameType | None)) -> None:
nonlocal got_alarm
got_alarm = True
sleeper.kill()
old_sigalrm = signal.signal(signal.SIGALRM, on_alarm)
try:
signal.alarm(1)
sync_wait_reapable(sleeper.pid)
assert (sleeper.wait(timeout=1) == (- 9))
finally:
if (sleeper.returncode is None):
sleeper.kill()
sleeper.wait()
signal.signal(signal.SIGALRM, old_sigalrm) |
class File():
fileHeader: FileHeader
contents: List[bytes]
def FileHeader(self):
return self.fileHeader
def Content(self):
return self.contents
def serialize(self) -> bytes:
thisFile: dict = FileSchema().dump(self)
return msgpack.packb(thisFile, use_bin_type=True)
def deserialize(data: bytes) -> 'File':
thisFile = msgpack.unpackb(data, use_list=False, raw=False)
return FileSchema().load(thisFile)
def BPP(self) -> float:
return ((sum((len(x) for x in self.contents)) * 8) / self.FileHeader.ImageSize.Pixels)
def size(self, human: bool=False) -> Union[(int, str)]:
size = sum((len(x) for x in self.contents))
if (not human):
return size
return vlutils.logger.readableSize(size)
def __str__(self) -> str:
return f'''Header: {self.fileHeader}
Size : {self.size(True)}
BPP : {self.BPP:.4f}'''
def __hash__(self) -> int:
return hash(self.serialize()) |
def list_all_i2c_ports(path):
sensor_name = {}
for item in os.listdir(path):
power_label_path = '{path}/{item}'.format(path=path, item=item)
if item.endswith('_label'):
raw_name = cat(power_label_path).strip()
if ('NC' in raw_name):
logger.warn('Skipped NC {path}'.format(path=power_label_path))
continue
number_port = int(item.split('_')[0].strip('in'))
if (number_port == 7):
logger.warn('Skipped "sum of shunt voltages" {path}'.format(path=power_label_path))
continue
warnings = {'type': 'INA3221'}
if check_file('{path}/curr{num}_crit_alarm'.format(path=path, num=number_port)):
warnings['crit_alarm'] = '{path}/curr{num}_crit_alarm'.format(path=path, num=number_port)
if check_file('{path}/curr{num}_max_alarm'.format(path=path, num=number_port)):
warnings['max_alarm'] = '{path}/curr{num}_max_alarm'.format(path=path, num=number_port)
values = read_power_status(warnings)
logger.info('Alarms {name} - {data}'.format(name=raw_name, data=values))
sensor = {'type': 'INA3221'}
if check_file('{path}/in{num}_input'.format(path=path, num=number_port)):
sensor['volt'] = '{path}/in{num}_input'.format(path=path, num=number_port)
if check_file('{path}/curr{num}_input'.format(path=path, num=number_port)):
sensor['curr'] = '{path}/curr{num}_input'.format(path=path, num=number_port)
if check_file('{path}/curr{num}_max'.format(path=path, num=number_port)):
sensor['warn'] = '{path}/curr{num}_max'.format(path=path, num=number_port)
if check_file('{path}/curr{num}_crit'.format(path=path, num=number_port)):
sensor['crit'] = '{path}/curr{num}_crit'.format(path=path, num=number_port)
if (len(sensor) > 1):
sensor_name[raw_name] = sensor
elif item.startswith('rail_name_'):
raw_name = cat(power_label_path).strip()
number_port = int(item.lstrip('rail_name_'))
sensor = {'type': 'INA3221'}
if check_file('{path}/in_voltage{num}_input'.format(path=path, num=number_port)):
sensor['volt'] = '{path}/in_voltage{num}_input'.format(path=path, num=number_port)
if check_file('{path}/in_current{num}_input'.format(path=path, num=number_port)):
sensor['curr'] = '{path}/in_current{num}_input'.format(path=path, num=number_port)
if check_file('{path}/in_power{num}_input'.format(path=path, num=number_port)):
sensor['power'] = '{path}/in_power{num}_input'.format(path=path, num=number_port)
if check_file('{path}/warn_current_limit_{num}'.format(path=path, num=number_port)):
sensor['warn'] = '{path}/warn_current_limit_{num}'.format(path=path, num=number_port)
if check_file('{path}/crit_current_limit_{num}'.format(path=path, num=number_port)):
sensor['crit'] = '{path}/crit_current_limit_{num}'.format(path=path, num=number_port)
if (len(sensor) > 1):
sensor_name[raw_name] = sensor
return sensor_name |
def test_interhand_3d_head():
N = 4
input_shape = (N, 2048, 8, 8)
inputs = torch.rand(input_shape, dtype=torch.float32)
target = [inputs.new_zeros(N, 42, 64, 64, 64), inputs.new_zeros(N, 1), inputs.new_zeros(N, 2)]
target_weight = [inputs.new_ones(N, 42, 1), inputs.new_ones(N, 1), inputs.new_ones(N)]
img_metas = [{'img_shape': (256, 256, 3), 'center': np.array([112, 112]), 'scale': np.array([0.5, 0.5]), 'bbox_score': 1.0, 'bbox_id': 0, 'flip_pairs': [], 'inference_channel': np.arange(42), 'image_file': '<demo>.png', 'heatmap3d_depth_bound': 400.0, 'root_depth_bound': 400.0} for _ in range(N)]
head = Interhand3DHead(keypoint_head_cfg=dict(in_channels=2048, out_channels=(21 * 64), depth_size=64, num_deconv_layers=3, num_deconv_filters=(256, 256, 256), num_deconv_kernels=(4, 4, 4)), root_head_cfg=dict(in_channels=2048, heatmap_size=64, hidden_dims=(512,)), hand_type_head_cfg=dict(in_channels=2048, num_labels=2, hidden_dims=(512,)), loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True), loss_root_depth=dict(type='L1Loss'), loss_hand_type=dict(type='BCELoss', use_target_weight=True), train_cfg={}, test_cfg={})
head.init_weights()
output = head(inputs)
assert isinstance(output, list)
assert (len(output) == 3)
assert (output[0].shape == (N, 42, 64, 64, 64))
assert (output[1].shape == (N, 1))
assert (output[2].shape == (N, 2))
losses = head.get_loss(output, target, target_weight)
assert ('hand_loss' in losses)
assert ('rel_root_loss' in losses)
assert ('hand_type_loss' in losses)
flip_pairs = [[i, (21 + i)] for i in range(21)]
output = head.inference_model(inputs, flip_pairs)
assert isinstance(output, list)
assert (len(output) == 3)
assert (output[0].shape == (N, 42, 64, 64, 64))
assert (output[1].shape == (N, 1))
assert (output[2].shape == (N, 2))
result = head.decode(img_metas, output)
assert ('preds' in result)
assert ('rel_root_depth' in result)
assert ('hand_type' in result) |
def html(title=None, extra_content=''):
html = ('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"\n " <head>\n <meta content="text/html; charset=ISO-8859-1">\n <title>mechanize</title>\n </head>\n <body><a href=" % extra_content)
if (title is not None):
html = re.sub('<title>(.*)</title>', ('<title>%s</title>' % title), html)
return html |
class Migrate():
def __init__(self, pipelines: Set[FeatureSetPipeline]) -> None:
self.pipelines = pipelines
def _send_logs_to_s3(self, file_local: bool, debug_mode: bool) -> None:
file_name = '../logging.json'
if ((not file_local) and os.path.exists(file_name)):
s3_client = boto3.client('s3')
timestamp = datetime.datetime.now()
if debug_mode:
object_name = f"logs/migrate-debug-mode/{timestamp.strftime('%Y-%m-%d')}/logging-{timestamp.strftime('%H:%M:%S')}.json"
else:
object_name = f"logs/migrate/{timestamp.strftime('%Y-%m-%d')}/logging-{timestamp.strftime('%H:%M:%S')}.json"
bucket = environment.get_variable('FEATURE_STORE_S3_BUCKET')
try:
s3_client.upload_file(file_name, bucket, object_name, ExtraArgs={'ACL': 'bucket-owner-full-control'})
except ClientError:
raise
os.remove(file_name)
elif os.path.exists(file_name):
print('Logs written to ../logging.json')
else:
print('No logs were generated.')
def run(self, generate_logs: bool=False, debug_mode: bool=False) -> None:
for pipeline in self.pipelines:
for writer in pipeline.sink.writers:
db = writer.db_config.database
if (db == 'cassandra'):
migration = ALLOWED_DATABASE[db]
migration.apply_migration(pipeline.feature_set, writer, debug_mode)
else:
logger.warning(f'Butterfree not supporting {db} Migrations yet.')
self._send_logs_to_s3(generate_logs, debug_mode) |
.unit()
.parametrize(('markers', 'marker_name', 'expected_markers', 'expected_others'), [(None, 'not_found', [], []), ([], 'not_found', [], []), ([pytask.mark.produces(), pytask.mark.depends_on()], 'produces', [pytask.mark.produces()], [pytask.mark.depends_on()]), ([pytask.mark.produces(), pytask.mark.produces(), pytask.mark.depends_on()], 'produces', [pytask.mark.produces(), pytask.mark.produces()], [pytask.mark.depends_on()])])
def test_remove_marks_from_func(markers, marker_name, expected_markers, expected_others):
def func():
...
if (markers is not None):
func.pytask_meta = CollectionMetadata(markers=markers)
(obj, result_markers) = remove_marks(func, marker_name)
markers = get_all_marks(obj)
assert (markers == expected_others)
assert (result_markers == expected_markers) |
class TestCuDevice(unittest.TestCase):
def testCudaMatrixResize(self):
size_multiples = [1, 2, 4, 8, 16, 32]
num_matrices = 256
time_in_secs = 0.2
for size_multiple in size_multiples:
sizes = []
for i in range(num_matrices):
num_rows = kaldi_math.rand_int(1, 10)
num_rows *= (num_rows * size_multiple)
num_cols = kaldi_math.rand_int(1, 10)
num_cols *= (num_cols * size_multiple)
sizes.append((num_rows, num_cols))
matrices = [CuMatrix() for _ in range(num_matrices)]
tim = Timer()
num_floats_processed = 0
while (tim.elapsed() < time_in_secs):
matrix = kaldi_math.rand_int(0, (num_matrices - 1))
if (matrices[matrix].num_rows() == 0):
(num_rows, num_cols) = sizes[matrix]
matrices[matrix].resize(num_rows, num_cols, MatrixResizeType.UNDEFINED)
num_floats_processed += (num_rows * num_cols)
else:
matrices[matrix].resize(0, 0)
gflops = (num_floats_processed / (tim.elapsed() * .0))
print('CuMatrix.resize: size multiple {}, speed was {} gigaflops'.format(size_multiple, gflops)) |
def from_pretrained(model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', archive_map=None, **kwargs):
from fairseq import checkpoint_utils, file_utils
if (archive_map is not None):
if (model_name_or_path in archive_map):
model_name_or_path = archive_map[model_name_or_path]
if ((data_name_or_path is not None) and (data_name_or_path in archive_map)):
data_name_or_path = archive_map[data_name_or_path]
if isinstance(model_name_or_path, dict):
for (k, v) in model_name_or_path.items():
if (k == 'checkpoint_file'):
checkpoint_file = v
elif ((k != 'path') and (k not in kwargs)):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for (file, arg) in {'code': 'bpe_codes', 'bpecodes': 'bpe_codes', 'sentencepiece.bpe.model': 'sentencepiece_model'}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if ('user_dir' in kwargs):
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
(models, args, task) = checkpoint_utils.load_model_ensemble_and_task([os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs)
return {'args': args, 'task': task, 'models': models} |
class Migration(migrations.Migration):
dependencies = [('conditions', '0022_condition_locked'), ('domain', '0048_meta'), ('questions', '0068_meta')]
operations = [migrations.CreateModel(name='Page', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(editable=False, verbose_name='created')), ('updated', models.DateTimeField(editable=False, verbose_name='updated')), ('uri', models.URLField(blank=True, help_text='The Uniform Resource Identifier of this page (auto-generated).', max_length=640, verbose_name='URI')), ('uri_prefix', models.URLField(help_text='The prefix for the URI of this page.', max_length=256, verbose_name='URI Prefix')), ('key', models.SlugField(blank=True, help_text='The internal identifier of this page.', max_length=128, verbose_name='Key')), ('path', models.CharField(blank=True, help_text='The path part of the URI of this page (auto-generated).', max_length=512, verbose_name='Path')), ('comment', models.TextField(blank=True, help_text='Additional internal information about this page.', verbose_name='Comment')), ('locked', models.BooleanField(default=False, help_text='Designates whether this page (and its questionsets and questions) can be changed.', verbose_name='Locked')), ('is_collection', models.BooleanField(default=False, help_text='Designates whether this page is a collection.', verbose_name='is collection')), ('order', models.IntegerField(default=0, help_text='The position of this page in lists.', verbose_name='Order')), ('title_lang1', models.CharField(blank=True, help_text='The title for this page in the primary language.', max_length=256, verbose_name='Title (primary)')), ('title_lang2', models.CharField(blank=True, help_text='The title for this page in the secondary language.', max_length=256, verbose_name='Title (secondary)')), ('title_lang3', models.CharField(blank=True, help_text='The title for this page in the tertiary language.', max_length=256, verbose_name='Title (tertiary)')), ('title_lang4', models.CharField(blank=True, help_text='The title for this page in the quaternary language.', max_length=256, verbose_name='Title (quaternary)')), ('title_lang5', models.CharField(blank=True, help_text='The title for this page in the quinary language.', max_length=256, verbose_name='Title (quinary)')), ('help_lang1', models.TextField(blank=True, help_text='The help text for this page in the primary language.', verbose_name='Help (primary)')), ('help_lang2', models.TextField(blank=True, help_text='The help text for this page in the secondary language.', verbose_name='Help (secondary)')), ('help_lang3', models.TextField(blank=True, help_text='The help text for this page in the tertiary language.', verbose_name='Help (tertiary)')), ('help_lang4', models.TextField(blank=True, help_text='The help text for this page in the quaternary language.', verbose_name='Help (quaternary)')), ('help_lang5', models.TextField(blank=True, help_text='The help text for this page in the quinary language.', verbose_name='Help (quinary)')), ('verbose_name_lang1', models.CharField(blank=True, help_text='The name displayed for this page in the primary language.', max_length=256, verbose_name='Name (primary)')), ('verbose_name_lang2', models.CharField(blank=True, help_text='The name displayed for this page in the secondary language.', max_length=256, verbose_name='Name (secondary)')), ('verbose_name_lang3', models.CharField(blank=True, help_text='The name displayed for this page in the tertiary language.', max_length=256, verbose_name='Name (tertiary)')), ('verbose_name_lang4', models.CharField(blank=True, help_text='The name displayed for this page in the quaternary language.', max_length=256, verbose_name='Name (quaternary)')), ('verbose_name_lang5', models.CharField(blank=True, help_text='The name displayed for this page in the quinary language.', max_length=256, verbose_name='Name (quinary)')), ('verbose_name_plural_lang1', models.CharField(blank=True, help_text='The plural name displayed for this page in the primary language.', max_length=256, verbose_name='Plural name (primary)')), ('verbose_name_plural_lang2', models.CharField(blank=True, help_text='The plural name displayed for this page in the secondary language.', max_length=256, verbose_name='Plural name (secondary)')), ('verbose_name_plural_lang3', models.CharField(blank=True, help_text='The plural name displayed for this page in the tertiary language.', max_length=256, verbose_name='Plural name (tertiary)')), ('verbose_name_plural_lang4', models.CharField(blank=True, help_text='The plural name displayed for this page in the quaternary language.', max_length=256, verbose_name='Plural name (quaternary)')), ('verbose_name_plural_lang5', models.CharField(blank=True, help_text='The plural name displayed for this page in the quinary language.', max_length=256, verbose_name='Plural name (quinary)')), ('attribute', models.ForeignKey(blank=True, help_text='The attribute this page belongs to.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pages', to='domain.attribute', verbose_name='Attribute')), ('conditions', models.ManyToManyField(blank=True, help_text='List of conditions evaluated for this page.', related_name='pages', to='conditions.Condition', verbose_name='Conditions')), ('section', models.ForeignKey(help_text='The section this page belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='pages', to='questions.section', verbose_name='Section'))], options={'verbose_name': 'page', 'verbose_name_plural': 'pages', 'ordering': ('section', 'order')}, bases=(models.Model, rdmo.core.models.TranslationMixin))] |
def test_logging_broken_makereport(testdir):
testdir.makepyfile(conftest='\n import pytest\n\n (hookwrapper=True, tryfirst=True)\n def pytest_runtest_makereport(call):\n if call.when == \'call\':\n raise Exception("This should not be hidden")\n yield\n ')
p = testdir.makepyfile('\n def test_foo():\n pass\n ')
res = testdir.runpytest_subprocess(p)
res.stdout.fnmatch_lines(['*This should not be hidden*']) |
class DevDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'ottqa_dev.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.tab_processor = get_default_processor(max_cell_length=15, tokenizer=AutoTokenizer.from_pretrained(args.bert.location, use_fast=False), max_input_length=(args.seq2seq.table_truncation_max_length // 2))
self.extended_data = []
expansion = (args.seq2seq.expansion if args.seq2seq.expansion else 1)
for expand_id in range(expansion):
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
question = extend_data['question'].lower()
table = extend_data['table']
passage_context_str = extend_data['passage']
gold_result = extend_data['answer_text']
table_context = copy.deepcopy(table)
for truncate_func in self.tab_processor.table_truncate_funcs:
truncate_func.truncate_table(table_context, question, [])
linear_table = self.tab_processor.table_linearize_func.process_table(table_context)
table_passage_context = (linear_table + passage_context_str)
extend_data.update({'struct_in': table_passage_context.lower(), 'text_in': question.lower(), 'seq_out': gold_result.lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def produce_pred_data(save_path, output_path):
test_word = np.load('data/testall_word.npy')
test_pos1 = np.load('data/testall_pos1.npy')
test_pos2 = np.load('data/testall_pos2.npy')
test_y = np.load('data/testall_y.npy')
with open('origin_data/test.txt', 'r', encoding='utf-8') as input:
test_data = input.readlines()
test_word = np.reshape(test_word, [(- 1), 70])
test_pos1 = np.reshape(test_pos1, [(- 1), 70])
test_pos2 = np.reshape(test_pos2, [(- 1), 70])
pred_entitypair = {}
batch_size = 100
steps = ((len(test_y) // batch_size) + 1)
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
interact = cnnmodel.interaction(sess, save_path)
for step in tqdm(range(steps)):
batch_word = test_word[(batch_size * step):(batch_size * (step + 1))]
batch_pos1 = test_pos1[(batch_size * step):(batch_size * (step + 1))]
batch_pos2 = test_pos2[(batch_size * step):(batch_size * (step + 1))]
batch_y = test_y[(batch_size * step):(batch_size * (step + 1))]
batch_y = [np.argmax(i) for i in batch_y]
batch_test_date = test_data[(batch_size * step):(batch_size * (step + 1))]
batch_entitypair = []
for line in batch_test_date:
items = line.split('\t')
e1 = items[0]
e2 = items[1]
batch_entitypair.append(((e1 + '$') + e2))
(batch_relation, batch_prob) = interact.test(batch_word, batch_pos1, batch_pos2)
assert ((len(batch_relation) == len(batch_prob)) and (len(batch_relation) == len(batch_entitypair)))
for i in range(len(batch_relation)):
if (batch_relation[i] != 0):
tmp_key = batch_entitypair[i]
tmp_value = (batch_prob[i], batch_relation[i])
if (tmp_key not in pred_entitypair.keys()):
pred_entitypair[tmp_key] = []
pred_entitypair[tmp_key] = tmp_value
elif (tmp_value[0] > pred_entitypair[tmp_key][0]):
pred_entitypair[tmp_key] = tmp_value
with open(output_path, 'wb') as output:
pickle.dump(pred_entitypair, output) |
class testsFromCommandLine(unittest.TestCase):
def setUp(self):
pynag.Model.ObjectDefinition.objects.get_all()
def tearDown(self):
self.assertEqual(False, pynag.Model.config.needs_reparse(), 'Seems like nagios configuration changed while running the unittests. Some of the tests might have made changes!')
def testCommandPluginTest(self):
expected_output = (0, '', '')
actual_output = pynag.Utils.runCommand((pynagbase + '/scripts/plugintest'))
self.assertEqual(expected_output, actual_output)
def testCommandPynag(self):
pynag_script = (pynagbase + '/scripts/pynag')
ok_commands = [('%s list' % pynag_script), ('%s list where host_name=localhost and object_type=host' % pynag_script), ('%s list where host_name=localhost and object_type=host --json --debug' % pynag_script), ('%s update where nonexistantfield=test set nonexistentfield=pynag_unit_testing' % pynag_script), ('%s config --get cfg_dir' % pynag_script)]
for i in ok_commands:
(exit_code, stdout, stderr) = pynag.Utils.runCommand(i, env={'PYTHONPATH': pynagbase})
self.assertEqual(0, exit_code, ('Error when running command %s\nexit_code: %s\noutput: %s\nstderr: %s' % (i, exit_code, stdout, stderr))) |
def test_bookmarks_folder(kodi):
resp = kodi.Files.GetDirectory(directory='plugin://video.kino.pub/bookmarks/161701/', properties=['country', 'year', 'rating', 'duration', 'director', 'trailer', 'plot', 'cast', 'imdbnumber', 'votes', 'fanart'])
assert (expected_results.BOOKMARK_FOLDER_CONTENT == resp['result']['files']) |
def test_dequantize():
levels = 20
qarr = np.random.randint(levels, size=(10, 10))
arr = mmcv.dequantize(qarr, (- 1), 1, levels)
assert (arr.shape == qarr.shape)
assert (arr.dtype == np.dtype('float64'))
for i in range(qarr.shape[0]):
for j in range(qarr.shape[1]):
assert (arr[(i, j)] == (((qarr[(i, j)] + 0.5) / 10) - 1))
arr = mmcv.dequantize(qarr, (- 1), 1, levels, dtype=np.float32)
assert (arr.shape == qarr.shape)
assert (arr.dtype == np.dtype('float32'))
with pytest.raises(ValueError):
mmcv.dequantize(arr, (- 1), 1, levels=0)
with pytest.raises(ValueError):
mmcv.dequantize(arr, (- 1), 1, levels=10.0)
with pytest.raises(ValueError):
mmcv.dequantize(arr, 2, 1, levels) |
def regex(pattern: Union[(str, Pattern)], flags: int=0):
async def func(flt, _, update: Update):
if isinstance(update, Message):
value = (update.text or update.caption)
elif isinstance(update, CallbackQuery):
value = update.data
elif isinstance(update, InlineQuery):
value = update.query
else:
raise ValueError(f"Regex filter doesn't work with {type(update)}")
if value:
update.matches = (list(flt.p.finditer(value)) or None)
return bool(update.matches)
return create(func, 'RegexFilter', p=(pattern if isinstance(pattern, Pattern) else re.compile(pattern, flags))) |
def load_trec():
datasets = load_dataset('trec')
train_dataset = datasets['train']
test_dataset = datasets['test']
idxs = list(range(len(train_dataset)))
random.shuffle(idxs)
num_reserve = int((len(train_dataset) * 0.1))
dev_dataset = [{'text': train_dataset[i]['text'], 'label': train_dataset[i]['label-coarse']} for i in idxs[(- num_reserve):]]
train_dataset = [{'text': train_dataset[i]['text'], 'label': train_dataset[i]['label-coarse']} for i in idxs[:(- num_reserve)]]
test_dataset = [{'text': d['text'], 'label': d['label-coarse']} for d in test_dataset]
datasets = {'train': train_dataset, 'validation': dev_dataset, 'test': test_dataset}
return datasets |
class GrabButton(rq.Request):
_request = rq.Struct(rq.Opcode(28), rq.Bool('owner_events'), rq.RequestLength(), rq.Window('grab_window'), rq.Card16('event_mask'), rq.Set('pointer_mode', 1, (X.GrabModeSync, X.GrabModeAsync)), rq.Set('keyboard_mode', 1, (X.GrabModeSync, X.GrabModeAsync)), rq.Window('confine_to', (X.NONE,)), rq.Cursor('cursor', (X.NONE,)), rq.Card8('button'), rq.Pad(1), rq.Card16('modifiers')) |
def parser_options():
parser = argparse.ArgumentParser()
parser.add_argument('--path_opt', default='option/RSITMD_AMFMN.yaml', type=str, help='path to a yaml options file')
parser.add_argument('--resume', default='checkpoint/rsitmd_aba/0/AMFMN_best.pth.tar', type=str, help='path to a yaml options file')
opt = parser.parse_args()
with open(opt.path_opt, 'r') as handle:
options = yaml.load(handle)
options['optim']['resume'] = opt.resume
return options |
def assert_none_blocked(ad_blocker):
assert_urls(ad_blocker, (NOT_OKAY_URLS + OKAY_URLS), False)
def assert_not_blocked(url, source_url, resource_type):
nonlocal ad_blocker
assert (not ad_blocker._is_blocked(url, source_url, resource_type))
run_function_on_dataset(assert_not_blocked) |
def torch_dtype_from_trt(dtype):
if (dtype == trt.bool):
return torch.bool
elif (dtype == trt.int8):
return torch.int8
elif (dtype == trt.int32):
return torch.int32
elif (dtype == trt.float16):
return torch.float16
elif (dtype == trt.float32):
return torch.float32
else:
raise TypeError(('%s is not supported by torch' % dtype)) |
def test():
make_path(FLAGS)
config = load_config(FLAGS.config_file)
with open(FLAGS.map_file, 'rb') as f:
(char_to_id, id_to_char, tag_to_id, id_to_tag) = pickle.load(f)
test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)
update_tag_scheme(test_sentences, FLAGS.tag_schema)
test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id)
test_manager = BatchManager(test_data, 100)
log_path = os.path.join('log', FLAGS.log_file)
logger = get_logger(log_path)
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
tf_config = tf.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
evaluate(sess, model, 'test', test_manager, id_to_tag, logger) |
class TestIPTW():
def data(self):
df = pd.DataFrame()
df['A'] = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
df['Y'] = [1, 0, 0, 0, 1, 1, 1, 0, 0, 1]
df['L'] = [1, 1, 0, 0, 0, 1, 1, 1, 1, 0]
return df
def test_unstabilized_weights(self, data):
ipt = IPTW(data, treatment='A', outcome='Y')
ipt.treatment_model(model_denominator='L', stabilized=False, print_results=False)
ipt.marginal_structural_model('A')
ipt.fit()
npt.assert_allclose(ipt.iptw, [3, 3, (4 / 3), (4 / 3), (4 / 3), 1.5, 1.5, 1.5, 1.5, 4])
def test_stabilized_weights(self, data):
ipt = IPTW(data, treatment='A', outcome='Y')
ipt.treatment_model(model_denominator='L', print_results=False)
ipt.marginal_structural_model('A')
ipt.fit()
npt.assert_allclose(ipt.iptw, [1.5, 1.5, (2 / 3), (2 / 3), (2 / 3), (3 / 4), (3 / 4), (3 / 4), (3 / 4), 2])
def test_positivity_calculator(self, data):
ipt = IPTW(data, treatment='A', outcome='Y')
ipt.treatment_model(model_denominator='L', print_results=False)
ipt.marginal_structural_model('A')
ipt.fit()
ipt.positivity()
npt.assert_allclose(ipt._pos_avg, 1)
npt.assert_allclose(ipt._pos_sd, 0.456435, rtol=1e-05)
npt.assert_allclose(ipt._pos_min, (2 / 3))
npt.assert_allclose(ipt._pos_max, 2)
def test_match_sas_unstabilized(self, sdata):
sas_w_sum = 1086.25
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), (- 0.))
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead')
ipt.treatment_model(model_denominator=model, stabilized=False, print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(np.sum(ipt.iptw), sas_w_sum, rtol=0.0001)
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, rtol=0.0001)
def test_match_sas_stabilized(self, sdata):
sas_w_sum =
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), (- 0.))
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead')
ipt.treatment_model(model_denominator=model, print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(np.sum(ipt.iptw), sas_w_sum, rtol=0.0001)
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, rtol=0.0001)
def test_match_sas_smr_e(self, sdata):
sas_w_sum = 158.288404
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), (- 0.))
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead', standardize='exposed')
ipt.treatment_model(model_denominator=model, stabilized=False, print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(np.sum(ipt.iptw), sas_w_sum, rtol=0.0001)
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, rtol=0.0001)
def test_match_sas_smr_u(self, sdata):
sas_w_sum =
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), (- 0.))
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead', standardize='unexposed')
ipt.treatment_model(model_denominator=model, stabilized=False, print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(np.sum(ipt.iptw), sas_w_sum, rtol=0.0001)
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, rtol=0.0001)
def test_match_sas_smr_e_stabilized(self, sdata):
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), (- 0.))
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead', standardize='exposed')
ipt.treatment_model(model_denominator=model, print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, rtol=0.0001)
def test_match_sas_smr_u_stabilized(self, sdata):
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), (- 0.))
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead', standardize='unexposed')
ipt.treatment_model(model_denominator=model, print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, rtol=0.0001)
def test_standardized_differences(self, sdata):
ipt = IPTW(sdata, treatment='art', outcome='dead')
ipt.treatment_model(model_denominator='male + age0 + cd40 + dvl0', print_results=False)
ipt.marginal_structural_model('art')
ipt.fit()
smd = ipt.standardized_mean_differences()
npt.assert_allclose(np.array(smd['smd_u']), np.array([(- 0.015684), 0.022311, (- 0.4867), (- 0.015729)]), rtol=0.0001)
npt.assert_allclose(np.array(smd['smd_w']), np.array([(- 0.097789), (- 0.012395), (- 0.018591), 0.050719]), rtol=0.0001)
def test_match_r_stddiff(self):
df = pd.DataFrame()
df['y'] = [1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0]
df['treat'] = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
df['bin'] = [0, 1, 0, np.nan, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1]
df['con'] = [0.1, 0.0, 1.0, 1.1, 2.2, 1.3, 0.1, 0.5, 0.9, 0.5, 0.3, 0.2, 0.7, 0.9, 1.4]
df['dis'] = [0, 1, 3, 2, 1, 0, 0, 0, 0, 0, 1, 3, 2, 2, 1]
df['cat'] = [1, 2, 3, 1, 1, 2, 3, 1, 3, 2, 1, 2, 3, 2, 1]
ipt = IPTW(df, treatment='treat', outcome='y')
ipt.treatment_model(model_denominator='bin + con + dis + C(cat)', print_results=False)
ipt.marginal_structural_model('treat')
ipt.fit()
smd = ipt.standardized_mean_differences()
npt.assert_allclose(np.array(smd['smd_u']), np.array([0.342997, 0.0, 0.06668, (- 0.513553)]), rtol=0.0001)
npt.assert_allclose(np.array(smd['smd_w']), np.array([0.206072, (- 0.148404), 0.035683, 0.085775]), rtol=0.0001)
def test_match_sas_gbound(self, sdata):
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), 0.)
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead')
ipt.treatment_model(model_denominator=model, print_results=False, bound=0.1)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, atol=0.0001, rtol=0.0001)
def test_match_sas_gbound2(self, sdata):
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), 0.)
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead')
ipt.treatment_model(model_denominator=model, print_results=False, bound=[0.2, 0.9])
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, atol=0.0001, rtol=0.0001)
def test_match_sas_gbound3(self, sdata):
sas_rd = (- 0.)
sas_rd_ci = ((- 0.), 0.)
model = 'male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0'
ipt = IPTW(sdata, treatment='art', outcome='dead')
ipt.treatment_model(model_denominator=model, print_results=False, bound=0.5)
ipt.marginal_structural_model('art')
ipt.fit()
npt.assert_allclose(ipt.risk_difference['RD'][1], sas_rd, rtol=1e-05)
npt.assert_allclose((ipt.risk_difference['95%LCL'][1], ipt.risk_difference['95%UCL'][1]), sas_rd_ci, atol=0.0001, rtol=0.0001)
def test_iptw_w_censor(self, sdata):
iptw = IPTW(sdata, treatment='art', outcome='dead')
iptw.treatment_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
iptw.missing_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
iptw.marginal_structural_model('art')
iptw.fit()
npt.assert_allclose(iptw.risk_difference['RD'][1], (- 0.08092), rtol=1e-05)
npt.assert_allclose((iptw.risk_difference['95%LCL'][1], iptw.risk_difference['95%UCL'][1]), ((- 0.15641), (- 0.00543)), atol=0.0001, rtol=0.0001)
def test_iptw_w_censor2(self, cdata):
iptw = IPTW(cdata, treatment='art', outcome='cd4_wk45')
iptw.treatment_model('male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
iptw.missing_model('art + male + age0 + age_rs1 + age_rs2 + cd40 + cd4_rs1 + cd4_rs2 + dvl0', print_results=False)
iptw.marginal_structural_model('art')
iptw.fit()
npt.assert_allclose(iptw.average_treatment_effect['ATE'][1], 205.11238, rtol=1e-05)
npt.assert_allclose((iptw.average_treatment_effect['95%LCL'][1], iptw.average_treatment_effect['95%UCL'][1]), (96.88535, 313.33941), atol=0.0001, rtol=0.0001) |
class MoleculeDataset(Dataset):
def __init__(self, data: List[MoleculeDatapoint]):
self._data = data
self._scaler = None
self._batch_graph = None
self._random = Random()
def smiles(self, flatten: bool=False) -> Union[(List[str], List[List[str]])]:
if flatten:
return [smiles for d in self._data for smiles in d.smiles]
return [d.smiles for d in self._data]
def mols(self, flatten: bool=False) -> Union[(List[Chem.Mol], List[List[Chem.Mol]])]:
if flatten:
return [mol for d in self._data for mol in d.mol]
return [d.mol for d in self._data]
def number_of_molecules(self) -> int:
return (self._data[0].number_of_molecules if (len(self._data) > 0) else None)
def batch_graph(self) -> List[BatchMolGraph]:
if (self._batch_graph is None):
self._batch_graph = []
mol_graphss = [[MolGraph(m, d.atom_features) for m in d.mol] for d in self._data]
self._batch_graph = [BatchMolGraph([g[i] for g in mol_graphss]) for i in range(len(mol_graphss[0]))]
return self._batch_graph
def features(self) -> List[np.ndarray]:
if ((len(self._data) == 0) or (self._data[0].features is None)):
return None
return [d.features for d in self._data]
def atom_descriptors(self) -> List[np.ndarray]:
if ((len(self._data) == 0) or (self._data[0].atom_descriptors is None)):
return None
return [d.atom_descriptors for d in self._data]
def targets(self) -> List[List[Optional[float]]]:
return [d.targets for d in self._data]
def num_tasks(self) -> int:
return (self._data[0].num_tasks() if (len(self._data) > 0) else None)
def features_size(self) -> int:
return (len(self._data[0].features) if ((len(self._data) > 0) and (self._data[0].features is not None)) else None)
def atom_descriptors_size(self) -> int:
return (len(self._data[0].atom_descriptors[0]) if ((len(self._data) > 0) and (self._data[0].atom_descriptors is not None)) else None)
def atom_features_size(self) -> int:
return (len(self._data[0].atom_features[0]) if ((len(self._data) > 0) and (self._data[0].atom_features is not None)) else None)
def normalize_features(self, scaler: StandardScaler=None, replace_nan_token: int=0) -> StandardScaler:
if ((len(self._data) == 0) or (self._data[0].features is None)):
return None
if (scaler is not None):
self._scaler = scaler
elif (self._scaler is None):
features = np.vstack([d.raw_features for d in self._data])
self._scaler = StandardScaler(replace_nan_token=replace_nan_token)
self._scaler.fit(features)
for d in self._data:
d.set_features(self._scaler.transform(d.raw_features.reshape(1, (- 1)))[0])
return self._scaler
def normalize_targets(self) -> StandardScaler:
targets = [d.raw_targets for d in self._data]
scaler = StandardScaler().fit(targets)
scaled_targets = scaler.transform(targets).tolist()
self.set_targets(scaled_targets)
return scaler
def scale_targets(self, scaler: StandardScaler):
targets = [d.raw_targets for d in self._data]
scaled_targets = scaler.transform(targets).tolist()
self.set_targets(scaled_targets)
def set_targets(self, targets: List[List[Optional[float]]]) -> None:
assert (len(self._data) == len(targets))
for i in range(len(self._data)):
self._data[i].set_targets(targets[i])
def reset_features_and_targets(self) -> None:
for d in self._data:
d.reset_features_and_targets()
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, idx: Union[(int, slice)]) -> Union[(MoleculeDatapoint, List[MoleculeDatapoint])]:
return self._data[idx] |
class Rumor_Data(Dataset):
def __init__(self, dataset):
self.text = torch.from_numpy(np.array(dataset['post_text']))
self.mask = torch.from_numpy(np.array(dataset['mask']))
self.label = torch.from_numpy(np.array(dataset['label']))
self.event_label = torch.from_numpy(np.array(dataset['event_label']))
print(('TEXT: %d, labe: %d, Event: %d' % (len(self.text), len(self.label), len(self.event_label))))
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return ((self.text[idx], self.mask[idx]), self.label[idx], self.event_label[idx]) |
def test_hourglass_ae_backbone():
with pytest.raises(AssertionError):
HourglassAENet(num_stacks=0)
with pytest.raises(AssertionError):
HourglassAENet(downsample_times=5, stage_channels=[256, 256, 384, 384, 384])
model = HourglassAENet(num_stacks=1)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 512, 512)
feat = model(imgs)
assert (len(feat) == 1)
assert (feat[0].shape == torch.Size([1, 34, 128, 128]))
model = HourglassAENet(num_stacks=2)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 512, 512)
feat = model(imgs)
assert (len(feat) == 2)
assert (feat[0].shape == torch.Size([1, 34, 128, 128]))
assert (feat[1].shape == torch.Size([1, 34, 128, 128])) |
def _add_ancillary_variables_attrs(data_arr: xr.DataArray) -> None:
list_ancillary_variable_names = [da_ancillary.attrs['name'] for da_ancillary in data_arr.attrs.get('ancillary_variables', [])]
if list_ancillary_variable_names:
data_arr.attrs['ancillary_variables'] = ' '.join(list_ancillary_variable_names)
else:
data_arr.attrs.pop('ancillary_variables', None) |
class UpBlock2D(nn.Module):
def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_upsample=True):
super().__init__()
resnets = []
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
resnets.append(ResnetBlock(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None):
for resnet in self.resnets:
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states)
return hidden_states |
class TestMarketplace():
('requests.request')
def test_timeout_exception(self, requests_mock):
requests_mock.side_effect = requests.exceptions.ReadTimeout()
user_api = RedHatUserApi(app_config)
subscription_api = RedHatSubscriptionApi(app_config)
customer_id = user_api.lookup_customer_id('')
assert (customer_id is None)
subscription_response = subscription_api.lookup_subscription(123456, 'sku')
assert (subscription_response is None)
subscription_sku = subscription_api.get_subscription_sku(123456)
assert (subscription_sku is None)
extended_subscription = subscription_api.extend_subscription(12345, 102623)
assert (extended_subscription is None)
create_subscription_response = subscription_api.create_entitlement(12345, 'sku')
assert (create_subscription_response == 408)
('requests.request')
def test_user_lookup(self, requests_mock):
user_api = RedHatUserApi(app_config)
requests_mock.return_value.content = json.dumps(mocked_user_service_response)
customer_id = user_api.lookup_customer_id('')
assert (customer_id == '1234567')
requests_mock.return_value.content = json.dumps(mocked_organization_only_response)
customer_id = user_api.lookup_customer_id('')
assert (customer_id is None) |
class MultiHeadAttention(nn.Module):
def __init__(self, h, d_model, attn_p=0.1, static=False, share=3):
super(MultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
self.share = share
assert ((d_model % h) == 0)
self.d_head = (d_model // h)
self.fc_query = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_key = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_value = Bottle(Linear(d_model, (h * self.d_head), bias=False))
self.fc_concat = Bottle(Linear((h * self.d_head), d_model, bias=False))
self.sm = nn.Softmax(dim=(- 1))
if static:
self.attn_dropout = StaticDropout(attn_p)
else:
self.attn_dropout = nn.Dropout(attn_p)
def forward(self, query, key, value, mask, incremental=False, incremental_cache=None):
(len_query, b) = (query.size(0), query.size(1))
if (self.share == 1):
proj_query = self.fc_query(query)
proj_key = self.fc_key(key)
proj_value = self.fc_value(value)
if incremental:
if (('k' in incremental_cache) and ('v' in incremental_cache)):
proj_key = torch.cat([incremental_cache['k'], proj_key], dim=0)
incremental_cache['k'] = proj_key
proj_value = torch.cat([incremental_cache['v'], proj_value], dim=0)
incremental_cache['v'] = proj_value
(len_key, b_) = (proj_key.size(0), proj_key.size(1))
else:
incremental_cache['k'] = proj_key
incremental_cache['v'] = proj_value
elif (self.share == 2):
proj_query = self.fc_query(query)
if (incremental and (('c_k' in incremental_cache) and ('c_v' in incremental_cache))):
proj_key = incremental_cache['c_k']
proj_value = incremental_cache['c_v']
else:
proj_key = self.fc_key(key)
proj_value = self.fc_value(value)
if incremental:
incremental_cache['c_k'] = proj_key
incremental_cache['c_v'] = proj_value
else:
proj_query = self.fc_query(query)
proj_key = self.fc_key(key)
proj_value = self.fc_value(value)
(q, k, v) = (proj_query, proj_key, proj_value)
(len_key, b_) = (k.size(0), k.size(1))
q = q.contiguous().view(len_query, (b * self.h), self.d_head).transpose(0, 1)
k = k.contiguous().view(len_key, (b * self.h), self.d_head).transpose(0, 1)
v = v.contiguous().view(len_key, (b * self.h), self.d_head).transpose(0, 1)
q = (q * (self.d_head ** (- 0.5)))
attns = torch.bmm(q, k.transpose(1, 2))
attns = attns.view(b, self.h, len_query, len_key)
if (mask is not None):
mask_ = mask.unsqueeze((- 3))
attns = attns.float().masked_fill_(mask_, (- float('inf'))).type_as(attns)
dtype_ = (torch.float64 if onmt.constants.double_precision else torch.float32)
attns = F.softmax(attns, dim=(- 1), dtype=dtype_).type_as(attns)
coverage = torch.mean(attns, dim=1)
attns = self.attn_dropout(attns)
attns = attns.view((b * self.h), len_query, len_key)
out = torch.bmm(attns, v)
out = out.transpose(0, 1).contiguous().view(len_query, b, self.d)
out = self.fc_concat(out)
return (out, coverage) |
def waitForEvent(emitter: EventEmitter, eventName: str, predicate: Callable[([Any], bool)], timeout: float, loop: asyncio.AbstractEventLoop) -> Awaitable:
promise = loop.create_future()
def resolveCallback(target: Any) -> None:
promise.set_result(target)
def rejectCallback(exception: Exception) -> None:
promise.set_exception(exception)
async def timeoutTimer() -> None:
(await asyncio.sleep((timeout / 1000)))
rejectCallback(TimeoutError('Timeout exceeded while waiting for event'))
def _listener(target: Any) -> None:
if (not predicate(target)):
return
cleanup()
resolveCallback(target)
listener = addEventListener(emitter, eventName, _listener)
if timeout:
eventTimeout = loop.create_task(timeoutTimer())
def cleanup() -> None:
removeEventListeners([listener])
if timeout:
eventTimeout.cancel()
return promise |
def bleu_score(input: Union[(str, Sequence[str])], target: Sequence[Union[(str, Sequence[str])]], n_gram: int=4, weights: Optional[torch.Tensor]=None, device: Optional[torch.device]=None) -> torch.Tensor:
(input_len, target_len, matches_by_order, possible_matches_by_order) = _bleu_score_update(input, target, n_gram, device)
return _bleu_score_compute(input_len, target_len, matches_by_order, possible_matches_by_order, n_gram, weights) |
.parametrize('manager, expected', zip(managers(), list(LEN.values())))
def test_get_compressed_output_size(manager, expected):
length = 10000
dtype = cupy.uint8
data = cupy.array(np.arange(0, (length // cupy.dtype(dtype).type(0).itemsize), dtype=dtype))
compressor_instance = manager()
compressed = compressor_instance.compress(data)
buffer_size = compressor_instance.get_compressed_output_size(compressed)
assert_compression_size(buffer_size, expected) |
class KaggleDataModel(DataModel):
def get_llm_side_data(self, serialize_method: str='tsv', num_visible_rows: int=3) -> Any:
formatted_tables = []
for _raw_data_path in self.raw_data_path:
table_data = self.raw_data[_raw_data_path]
table_name = self.raw_data_name[_raw_data_path]
table_path = _raw_data_path
formatted_table = serialize_df(table_data, table_name, table_path, serialize_method, num_visible_rows)
formatted_tables.append(formatted_table)
return '\n'.join(formatted_tables)
def to_react_table(table: pd.DataFrame) -> str:
columns = list(map((lambda item: {'accessorKey': item, 'header': item}), table.columns.tolist()))
data = table.fillna('').to_dict(orient='records')
table = json.dumps({'columns': columns, 'data': data})
return table
def get_human_side_data(self) -> Any:
react_tables = {}
for table_path in self.raw_data_path:
table_name = self.raw_data_name[table_path]
table = self.raw_data[table_path]
react_tables[table_name] = self.to_react_table(table)
return json.dumps(react_tables) |
def is_valid_balanceproof_signature(balance_proof: BalanceProofSignedState, sender_address: Address) -> SuccessOrError:
balance_hash = hash_balance_data(balance_proof.transferred_amount, balance_proof.locked_amount, balance_proof.locksroot)
data_that_was_signed = pack_balance_proof(nonce=balance_proof.nonce, balance_hash=balance_hash, additional_hash=balance_proof.message_hash, canonical_identifier=CanonicalIdentifier(chain_identifier=balance_proof.chain_id, token_network_address=balance_proof.token_network_address, channel_identifier=balance_proof.channel_identifier))
return is_valid_signature(data=data_that_was_signed, signature=balance_proof.signature, sender_address=sender_address) |
def test_validate_tpm_conditional_independence():
tpm = ExplicitTPM(np.array([[1, 0.0, 0.0, 0], [0, 0.5, 0.5, 0], [0, 0.5, 0.5, 0], [0, 0.0, 0.0, 1]]))
with pytest.raises(exceptions.ConditionallyDependentError):
tpm.conditionally_independent()
with pytest.raises(exceptions.ConditionallyDependentError):
tpm.validate()
tpm.validate(check_independence=False) |
def _get_n_and_p(bitwidth: tf.Variable, use_symmetric_encoding: tf.Variable) -> Tuple[(tf.Variable, tf.Variable)]:
bitwidth = tf.cast(bitwidth, tf.float32)
two_pow_bw = tf.cast(tf.pow(tf.cast(tf.constant(2), tf.float32), bitwidth), tf.float32)
two_pow_bw_minus_1 = tf.cast(tf.pow(tf.cast(tf.constant(2), tf.float32), (bitwidth - 1)), tf.float32)
minus_one_as_float32 = tf.cast(tf.constant((- 1)), tf.float32)
one_as_float32 = tf.cast(tf.constant(1), tf.float32)
def n_symmetric_encoding() -> tf.Variable:
return tf.add(tf.multiply(minus_one_as_float32, two_pow_bw_minus_1), one_as_float32)
def p_symmetric_encoding() -> tf.Variable:
return tf.subtract(two_pow_bw_minus_1, one_as_float32)
def n_asymmetric_encoding() -> tf.Variable:
return tf.cast(0, tf.float32)
def p_asymmetric_encoding() -> tf.Variable:
return tf.cast((two_pow_bw - 1), tf.float32)
n = tf.cond(use_symmetric_encoding, n_symmetric_encoding, n_asymmetric_encoding)
p = tf.cond(use_symmetric_encoding, p_symmetric_encoding, p_asymmetric_encoding)
return (n, p) |
class Cityscapes(VisionDataset):
CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id', 'has_instances', 'ignore_in_eval', 'color'])
classes = [CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)), CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)), CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)), CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)), CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)), CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)), CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)), CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)), CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)), CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)), CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)), CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)), CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)), CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)), CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)), CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)), CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)), CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)), CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)), CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)), CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)), CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)), CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)), CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)), CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)), CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)), CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)), CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)), CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)), CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)), CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)), CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)), CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)), CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)), CityscapesClass('license plate', (- 1), (- 1), 'vehicle', 7, False, True, (0, 0, 142))]
full_classes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, (- 1))
new_classes = (0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 5, 0, 0, 0, 6, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 17, 18, 19, 0)
def __init__(self, root, split='train', mode='fine', target_type='instance', transform=None, target_transform=None, transforms=None, remap=True):
super(Cityscapes, self).__init__(root, transforms, transform, target_transform)
self.mode = ('gtFine' if (mode == 'fine') else 'gtCoarse')
self.images_dir = os.path.join(self.root, 'leftImg8bit', split)
self.targets_dir = os.path.join(self.root, self.mode, split)
self.target_type = target_type
self.split = split
self.images = []
self.targets = []
self.remap = remap
verify_str_arg(mode, 'mode', ('fine', 'coarse'))
if (mode == 'fine'):
valid_modes = ('train', 'test', 'val')
else:
valid_modes = ('train', 'train_extra', 'val')
msg = "Unknown value '{}' for argument split if mode is '{}'. Valid values are {{{}}}."
msg = msg.format(split, mode, iterable_to_str(valid_modes))
verify_str_arg(split, 'split', valid_modes, msg)
if (not isinstance(target_type, list)):
self.target_type = [target_type]
[verify_str_arg(value, 'target_type', ('instance', 'semantic', 'polygon', 'color')) for value in self.target_type]
if (not os.path.isdir(self.images_dir)):
if (split == 'train_extra'):
image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainextra.zip'))
else:
image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainvaltest.zip'))
if (self.mode == 'gtFine'):
target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '_trainvaltest.zip'))
elif (self.mode == 'gtCoarse'):
target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '.zip'))
if (os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip)):
extract_archive(from_path=image_dir_zip, to_path=self.root)
extract_archive(from_path=target_dir_zip, to_path=self.root)
else:
raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the specified "split" and "mode" are inside the "root" directory')
for city in os.listdir(self.images_dir):
img_dir = os.path.join(self.images_dir, city)
target_dir = os.path.join(self.targets_dir, city)
for file_name in os.listdir(img_dir):
target_types = []
for t in self.target_type:
target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0], self._get_target_suffix(self.mode, t))
target_types.append(os.path.join(target_dir, target_name))
self.images.append(os.path.join(img_dir, file_name))
self.targets.append(target_types)
def __getitem__(self, index):
image = Image.open(self.images[index]).convert('RGB')
targets = []
for (i, t) in enumerate(self.target_type):
if (t == 'polygon'):
target = self._load_json(self.targets[index][i])
else:
target = Image.open(self.targets[index][i])
targets.append(target)
target = (tuple(targets) if (len(targets) > 1) else targets[0])
target = remap(target, self.full_classes, self.new_classes)
if (self.transforms is not None):
(image, target) = self.transforms(image, target)
return (image, target)
def __len__(self):
return len(self.images)
def extra_repr(self):
lines = ['Split: {split}', 'Mode: {mode}', 'Type: {target_type}']
return '\n'.join(lines).format(**self.__dict__)
def _load_json(self, path):
with open(path, 'r') as file:
data = json.load(file)
return data
def _get_target_suffix(self, mode, target_type):
if (target_type == 'instance'):
return '{}_instanceIds.png'.format(mode)
elif (target_type == 'semantic'):
return '{}_labelIds.png'.format(mode)
elif (target_type == 'color'):
return '{}_color.png'.format(mode)
else:
return '{}_polygons.json'.format(mode) |
_db
def test_get_conference_voucher_with_invalid_code(graphql_client, conference, mocker, requests_mock):
requests_mock.get(' status_code=404)
response = graphql_client.query('query($code: String!, $voucherCode: String!) {\n conference(code: $code) {\n voucher(code: $voucherCode) {\n id\n }\n }\n }', variables={'code': conference.code, 'voucherCode': 'test'})
assert (response['data']['conference']['voucher'] is None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.