code stringlengths 281 23.7M |
|---|
class Value(object):
def __init__(self, name, initial, **kwargs):
self.name = name
self.watch = False
self.set(initial)
self.info = {'type': 'Value'}
if (('profiled' in kwargs) and kwargs['profiled']):
self.info['profiled'] = True
self.info['persistent'] = True
if (('persistent' in kwargs) and kwargs['persistent']):
self.info['persistent'] = True
def update(self, value):
if (self.value != value):
self.set(value)
def get_msg(self):
if isinstance(self.value, str):
return (('"' + self.value) + '"')
if isinstance(self.value, bool):
return ('true' if self.value else 'false')
return str(self.value)
def set(self, value):
self.value = value
if self.watch:
if (self.watch.period == 0):
self.client.send((((self.name + '=') + self.get_msg()) + '\n'))
elif self.pwatch:
t0 = time.monotonic()
if (t0 >= self.watch.time):
self.watch.time = t0
self.client.values.insert_watch(self.watch)
self.pwatch = False |
class ContactBase(models.Model):
name = models.CharField(max_length=100, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
modified_on = models.DateTimeField(auto_now=True)
language = models.CharField(max_length=6, blank=True, help_text='The language which this contact prefers to communicate in, as a W3C language tag. If this field is left blank, RapidSMS will default to the value in LANGUAGE_CODE.')
class Meta():
abstract = True
app_label = 'rapidsms'
def __str__(self):
return (self.name or 'Anonymous')
def __repr__(self):
return ('<%s: %s>' % (type(self).__name__, self))
def is_anonymous(self):
return (not self.name)
def default_connection(self):
if (self.connection_set.count() > 0):
return self.connection_set.all()[0]
return None |
_torch
_tf
class DetermineFrameworkTest(TestCase):
def setUp(self):
self.test_model = SMALL_MODEL_IDENTIFIER
self.framework_pt = 'pt'
self.framework_tf = 'tf'
def _setup_pt_ckpt(self, save_dir):
model_pt = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(save_dir)
def _setup_tf_ckpt(self, save_dir):
model_tf = TFAutoModel.from_pretrained(self.test_model, from_pt=True)
model_tf.save_pretrained(save_dir)
def test_framework_provided(self):
mock_framework = 'mock_framework'
result = FeaturesManager.determine_framework(self.test_model, mock_framework)
self.assertEqual(result, mock_framework)
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(local_pt_ckpt)
result = FeaturesManager.determine_framework(local_pt_ckpt, mock_framework)
self.assertEqual(result, mock_framework)
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(local_tf_ckpt)
result = FeaturesManager.determine_framework(local_tf_ckpt, mock_framework)
self.assertEqual(result, mock_framework)
def test_checkpoint_provided(self):
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(local_pt_ckpt)
result = FeaturesManager.determine_framework(local_pt_ckpt)
self.assertEqual(result, self.framework_pt)
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(local_tf_ckpt)
result = FeaturesManager.determine_framework(local_tf_ckpt)
self.assertEqual(result, self.framework_tf)
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(FileNotFoundError):
result = FeaturesManager.determine_framework(local_invalid_ckpt)
def test_from_environment(self):
mock_tf_available = MagicMock(return_value=False)
with patch('transformers.onnx.features.is_tf_available', mock_tf_available):
result = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(result, self.framework_pt)
mock_torch_available = MagicMock(return_value=False)
with patch('transformers.onnx.features.is_torch_available', mock_torch_available):
result = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(result, self.framework_tf)
mock_tf_available = MagicMock(return_value=True)
mock_torch_available = MagicMock(return_value=True)
with patch('transformers.onnx.features.is_tf_available', mock_tf_available), patch('transformers.onnx.features.is_torch_available', mock_torch_available):
result = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(result, self.framework_pt)
mock_tf_available = MagicMock(return_value=False)
mock_torch_available = MagicMock(return_value=False)
with patch('transformers.onnx.features.is_tf_available', mock_tf_available), patch('transformers.onnx.features.is_torch_available', mock_torch_available):
with self.assertRaises(EnvironmentError):
result = FeaturesManager.determine_framework(self.test_model) |
def get_dataloader(dataset='coco', img_size=128):
if (dataset == 'coco'):
dataset = CocoSceneGraphDataset(image_dir='./datasets/coco/images/val2017/', instances_json='./datasets/coco/annotations/instances_val2017.json', stuff_json='./datasets/coco/annotations/stuff_val2017.json', stuff_only=True, image_size=(img_size, img_size), left_right_flip=False)
elif (dataset == 'vg'):
dataset = VgSceneGraphDataset(vocab_json='./data/tmp/vocab.json', h5_path='./data/tmp/preprocess_vg/val.h5', image_dir='./datasets/vg/', image_size=(128, 128), left_right_flip=False, max_objects=30)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, drop_last=True, shuffle=False, num_workers=0)
return dataloader |
class CTransport(Transport):
rcache = {}
def hash(self, msg):
return md5_new(ppc.b_(msg)).hexdigest()
def csend(self, msg):
msg = ppc.b_(msg)
hash1 = self.hash(msg)
if (hash1 in self.scache):
self.send(ppc.b_(('H' + hash1)))
else:
self.send((ppc.b_('N') + msg))
self.scache[hash1] = True
def creceive(self, preprocess=None):
msg = self.receive()
msg = ppc.b_(msg)
if (msg[:1] == ppc.b_('H')):
hash1 = ppc.str_(msg[1:])
else:
msg = msg[1:]
hash1 = self.hash(msg)
if (preprocess is None):
preprocess = (lambda x: x)
self.rcache[hash1] = tuple(map(preprocess, (msg,)))[0]
return self.rcache[hash1] |
def parse_args():
parser = argparse.ArgumentParser(description='MMOCR test (and eval) a onnx or tensorrt model.')
parser.add_argument('model_config', type=str, help='Config file.')
parser.add_argument('model_file', type=str, help='Input file name for evaluation.')
parser.add_argument('model_type', type=str, help='Detection or recognition model to deploy.', choices=['recog', 'det'])
parser.add_argument('backend', type=str, help='Which backend to test, TensorRT or ONNXRuntime.', choices=['TensorRT', 'ONNXRuntime'])
parser.add_argument('--eval', type=str, nargs='+', help='The evaluation metrics, which depends on the dataset, e.g.,"bbox", "seg", "proposal" for COCO, and "mAP", "recall" forPASCAL VOC.')
parser.add_argument('--device', default='cuda:0', help='Device used for inference.')
args = parser.parse_args()
return args |
def create_resnet20_spec(config):
spec = model_spec.ModelSpec(np.array([[0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]), ['input', 'conv3x3-bn-relu', 'conv3x3-bn-relu', 'output'])
config['num_stacks'] = 3
config['num_modules_per_stack'] = 3
config['stem_filter_size'] = 16
return spec |
class MnistParser():
def __init__(self, data_inputs=None, validation_inputs=None, batch_size=10):
if (not data_inputs):
data_inputs = ['data']
if (len(data_inputs) > 1):
raise ValueError('Only one data input supported for mnist')
self._data_inputs = data_inputs
if (not validation_inputs):
validation_inputs = ['labels']
if (len(validation_inputs) > 1):
raise ValueError('Only one validation input supported for mnist')
self._validation_inputs = validation_inputs
self._batch_size = batch_size
def parse(serialized_example):
dim = 28
features = tf.compat.v1.parse_single_example(serialized_example, features={'label': tf.compat.v1.FixedLenFeature([], tf.int64), 'image_raw': tf.compat.v1.FixedLenFeature([], tf.string)})
image = tf.compat.v1.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([(dim * dim)])
image = (tf.cast(image, tf.float32) / 255)
label = tf.cast(features['label'], tf.int32)
labels = tf.one_hot(indices=label, depth=10)
return (image, labels)
def get_batch(self, iterator):
(data, labels) = iterator.get_next()
with tf.compat.v1.Session(graph=data.graph) as sess:
(np_images, np_labels) = sess.run([data, labels])
return {self._data_inputs[0]: np_images, self._validation_inputs[0]: np_labels}
def get_batch_size(self):
return self._batch_size
def get_data_inputs(self):
return self._data_inputs
def get_validation_inputs(self):
return self._validation_inputs |
def _reduction_op_flop_jit(inputs, outputs, reduce_flops=1, finalize_flops=0):
input_shapes = [get_shape(v) for v in inputs]
output_shapes = [get_shape(v) for v in outputs]
in_elements = prod(input_shapes[0])
out_elements = prod(output_shapes[0])
num_flops = ((in_elements * reduce_flops) + (out_elements * (finalize_flops - reduce_flops)))
return num_flops |
def get_class_weights():
df_all = pd.read_csv((data_path + 'annotations.csv'))
return {'red_light': torch.Tensor(calc_class_weight(df_all['red_light'])), 'hazard_stop': torch.Tensor(calc_class_weight(df_all['hazard_stop'])), 'speed_sign': torch.Tensor(calc_class_weight(df_all['speed_sign'])), 'relative_angle': torch.Tensor([1]), 'center_distance': torch.Tensor([1]), 'veh_distance': torch.Tensor([1])} |
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if (args.no_save or (not distributed_utils.is_master(args))):
return
write_timer = StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (end_of_epoch and (not args.no_epoch_checkpoints) and ((epoch % args.save_interval) == 0))
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = ((not end_of_epoch) and (args.save_interval_updates > 0) and ((updates % args.save_interval_updates) == 0))
checkpoint_conds['checkpoint_best.pt'] = ((val_loss is not None) and ((not hasattr(save_checkpoint, 'best')) or (val_loss < save_checkpoint.best)))
checkpoint_conds['checkpoint_last.pt'] = True
prev_best = getattr(save_checkpoint, 'best', val_loss)
if (val_loss is not None):
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {'train_iterator': epoch_itr.state_dict(), 'val_loss': val_loss}
if hasattr(save_checkpoint, 'best'):
extra_state.update({'best': save_checkpoint.best})
checkpoints = [os.path.join(args.save_dir, fn) for (fn, cond) in checkpoint_conds.items() if cond]
if (len(checkpoints) > 0):
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if ((not end_of_epoch) and (args.keep_interval_updates > 0)):
checkpoints = utils.checkpoint_paths(args.save_dir, pattern='checkpoint_\\d+_(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if (args.keep_last_epochs > 0):
checkpoints = utils.checkpoint_paths(args.save_dir, pattern='checkpoint(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
write_timer.stop()
print('| saved checkpoint {} (epoch {} {} updates) (writing took {} seconds)'.format(checkpoints[0], epoch, updates, write_timer.sum)) |
class JointParameterized(Parameterized):
def __init__(self, components):
super(JointParameterized, self).__init__()
self.components = components
def get_params_internal(self, **tags):
params = [param for comp in self.components for param in comp.get_params_internal(**tags)]
return sorted(set(params), key=hash) |
def get_observation_photo_metadata(observation_id, access_token):
print(f'Fetching observation {observation_id}')
obs = get_observation(observation_id)
photo_ids = [photo['id'] for photo in obs.get('photos', [])]
photo_urls = [f'{PHOTO_INFO_BASE_URL}/{id}' for id in photo_ids]
print(f'{len(photo_urls)} photo URL(s) found')
return [get_photo_metadata(url, access_token) for url in photo_urls] |
class HardDiskAnnFileBackend():
def __init__(self, file_format='txt'):
assert (file_format in ['txt', 'lmdb'])
self.file_format = file_format
def __call__(self, ann_file):
if (self.file_format == 'lmdb'):
return LmdbAnnFileBackend(ann_file)
return list_from_file(ann_file) |
def scm_find_files(path: _t.PathT, scm_files: set[str], scm_dirs: set[str], force_all_files: bool=False) -> list[str]:
realpath = os.path.normcase(os.path.realpath(path))
seen: set[str] = set()
res: list[str] = []
for (dirpath, dirnames, filenames) in os.walk(realpath, followlinks=True):
realdirpath = os.path.normcase(os.path.realpath(dirpath))
def _link_not_in_scm(n: str, realdirpath: str=realdirpath) -> bool:
fn = os.path.join(realdirpath, os.path.normcase(n))
return (os.path.islink(fn) and (fn not in scm_files))
if ((not force_all_files) and (realdirpath not in scm_dirs)):
dirnames[:] = []
continue
if (os.path.islink(dirpath) and (not os.path.relpath(realdirpath, realpath).startswith(os.pardir))):
res.append(os.path.join(path, os.path.relpath(dirpath, path)))
dirnames[:] = []
continue
if (realdirpath in seen):
dirnames[:] = []
continue
dirnames[:] = [dn for dn in dirnames if (force_all_files or (not _link_not_in_scm(dn)))]
for filename in filenames:
if ((not force_all_files) and _link_not_in_scm(filename)):
continue
fullfilename = os.path.join(dirpath, filename)
is_tracked = (os.path.normcase(os.path.realpath(fullfilename)) in scm_files)
if (force_all_files or is_tracked):
res.append(os.path.join(path, os.path.relpath(fullfilename, realpath)))
seen.add(realdirpath)
return res |
.linux
_locale
def test_downloads_with_ascii_locale(request, server, tmp_path, quteproc_new):
args = (['--temp-basedir'] + _base_args(request.config))
quteproc_new.start(args, env={'LC_ALL': 'C'})
quteproc_new.set_setting('downloads.location.directory', str(tmp_path))
quteproc_new.set_setting('downloads.location.prompt', 'false')
url = '
quteproc_new.send_cmd(':download {}'.format(url))
quteproc_new.wait_for(category='downloads', message='Download ?-issue908.bin finished')
quteproc_new.set_setting('downloads.location.prompt', 'true')
quteproc_new.send_cmd(':download {}'.format(url))
quteproc_new.send_cmd(':prompt-open-download "{}" -c pass'.format(sys.executable))
quteproc_new.wait_for(category='downloads', message='Download a-issue908.bin finished')
quteproc_new.wait_for(category='misc', message='Opening * with [*python*]')
assert (len(list(tmp_path.iterdir())) == 1)
assert (tmp_path / '?-issue908.bin').exists() |
class EventDetector(nn.Module):
def __init__(self, pretrain, width_mult, lstm_layers, lstm_hidden, bidirectional=True, dropout=True):
super(EventDetector, self).__init__()
self.width_mult = width_mult
self.lstm_layers = lstm_layers
self.lstm_hidden = lstm_hidden
self.bidirectional = bidirectional
self.dropout = dropout
net = MobileNetV2(width_mult=width_mult)
state_dict_mobilenet = torch.load('mobilenet_v2.pth.tar')
if pretrain:
net.load_state_dict(state_dict_mobilenet)
self.cnn = nn.Sequential(*list(net.children())[0][:19])
self.rnn = nn.LSTM(int(((1280 * width_mult) if (width_mult > 1.0) else 1280)), self.lstm_hidden, self.lstm_layers, batch_first=True, bidirectional=bidirectional)
if self.bidirectional:
self.lin = nn.Linear((2 * self.lstm_hidden), 9)
else:
self.lin = nn.Linear(self.lstm_hidden, 9)
if self.dropout:
self.drop = nn.Dropout(0.5)
def init_hidden(self, batch_size):
if self.bidirectional:
return (Variable(torch.zeros((2 * self.lstm_layers), batch_size, self.lstm_hidden).cuda(), requires_grad=True), Variable(torch.zeros((2 * self.lstm_layers), batch_size, self.lstm_hidden).cuda(), requires_grad=True))
else:
return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden).cuda(), requires_grad=True), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden).cuda(), requires_grad=True))
def forward(self, x, lengths=None):
(batch_size, timesteps, C, H, W) = x.size()
self.hidden = self.init_hidden(batch_size)
c_in = x.view((batch_size * timesteps), C, H, W)
c_out = self.cnn(c_in)
c_out = c_out.mean(3).mean(2)
if self.dropout:
c_out = self.drop(c_out)
r_in = c_out.view(batch_size, timesteps, (- 1))
(r_out, states) = self.rnn(r_in, self.hidden)
out = self.lin(r_out)
out = out.view((batch_size * timesteps), 9)
return out |
(autouse=True)
def _foo_module(mock_module):
mock_module('foo.py', 'import jsonschema\n\nclass MyValidator:\n def __init__(self, schema, *args, **kwargs):\n self.schema = schema\n self.real_validator = jsonschema.validators.Draft7Validator(\n schema, *args, **kwargs\n )\n\n def iter_errors(self, data, *args, **kwargs):\n yield from self.real_validator.iter_errors(data, *args, **kwargs)\n for event in data["events"]:\n if "Occult" in event["title"]:\n yield jsonschema.exceptions.ValidationError(\n "Error! Occult event detected! Run!",\n validator=None,\n validator_value=None,\n instance=event,\n schema=self.schema,\n )\n') |
class PreSEAttBlock(nn.Module):
def __init__(self, in_channels, out_channels, reduction=16):
super(PreSEAttBlock, self).__init__()
mid_cannels = (out_channels // reduction)
self.bn = nn.BatchNorm2d(num_features=in_channels)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(in_channels=in_channels, out_channels=mid_cannels, bias=True)
self.conv2 = conv1x1(in_channels=mid_cannels, out_channels=out_channels, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.bn(x)
x = self.relu(x)
x = self.pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x |
def _compute_intersection(w1, w2):
col_off = max(w1.col_off, w2.col_off)
row_off = max(w1.row_off, w2.row_off)
width = (min((w1.col_off + w1.width), (w2.col_off + w2.width)) - col_off)
height = (min((w1.row_off + w1.height), (w2.row_off + w2.height)) - row_off)
return (col_off, row_off, width, height) |
def test_device_from_uuid_and_location_returns_unsupported():
unsupported = mock.create_autospec(discovery.UnsupportedDevice)
with mock.patch('pywemo.discovery.UnsupportedDevice', return_value=unsupported):
assert (discovery.device_from_uuid_and_location('uuid:Unsupported-1_0-SERIALNUMBER', ' debug=True) == unsupported) |
def test_discovery_responder_notify(mock_socket, mock_interface_addresses, mock_get_callback_address):
resp = ssdp.DiscoveryResponder(callback_port=MOCK_CALLBACK_PORT)
resp.send_notify('ssdp:alive')
params = {'callback': MOCK_CALLBACK_ADDRESS, 'nls': resp._nls_uuid, 'nts': 'ssdp:alive'}
mock_socket.sendto.assert_called_with((ssdp.SSDP_NOTIFY % params).encode('utf-8'), ('239.255.255.250', 1900)) |
def lisp_to_sparql(lisp_program: str):
clauses = []
order_clauses = []
entities = set()
identical_variables_r = {}
expression = lisp_to_nested_expression(lisp_program)
superlative = False
if (expression[0] in ['ARGMAX', 'ARGMIN']):
superlative = True
if isinstance(expression[2], list):
def retrieve_relations(exp: list):
rtn = []
for element in exp:
if (element == 'JOIN'):
continue
elif isinstance(element, str):
rtn.append(element)
elif (isinstance(element, list) and (element[0] == 'R')):
rtn.append(element)
elif (isinstance(element, list) and (element[0] == 'JOIN')):
rtn.extend(retrieve_relations(element))
return rtn
relations = retrieve_relations(expression[2])
expression = expression[:2]
expression.extend(relations)
sub_programs = _linearize_lisp_expression(expression, [0])
question_var = (len(sub_programs) - 1)
count = False
def get_root(var: int):
while (var in identical_variables_r):
var = identical_variables_r[var]
return var
for (i, subp) in enumerate(sub_programs):
i = str(i)
if (subp[0] == 'JOIN'):
if isinstance(subp[1], list):
if (subp[2][:2] in ['m.', 'g.']):
clauses.append((((((('<ns:' + subp[2]) + '> <ns:') + subp[1][1]) + '> ?x') + i) + ' .'))
entities.add(subp[2])
elif (subp[2][0] == '#'):
clauses.append((((((('?x' + subp[2][1:]) + ' <ns:') + subp[1][1]) + '> ?x') + i) + ' .'))
else:
if subp[2].__contains__('^^'):
data_type = subp[2].split('^^')[1].split('#')[1]
if (data_type not in ['integer', 'float', 'dateTime']):
subp[2] = f""""{(subp[2].split('^^')[0] + '-08:00')}"^^<{subp[2].split('^^')[1]}>"""
else:
subp[2] = f""""{subp[2].split('^^')[0]}"^^<{subp[2].split('^^')[1]}>"""
clauses.append((((((subp[2] + ' <ns:') + subp[1][1]) + '> ?x') + i) + ' .'))
elif (subp[2][:2] in ['m.', 'g.']):
clauses.append((((((('?x' + i) + ' <ns:') + subp[1]) + '> <ns:') + subp[2]) + '> .'))
entities.add(subp[2])
elif (subp[2][0] == '#'):
clauses.append((((((('?x' + i) + ' <ns:') + subp[1]) + '> ?x') + subp[2][1:]) + ' .'))
else:
if subp[2].__contains__('^^'):
data_type = subp[2].split('^^')[1].split('#')[1]
if (data_type not in ['integer', 'float', 'dateTime']):
subp[2] = f""""{(subp[2].split('^^')[0] + '-08:00')}"^^<{subp[2].split('^^')[1]}>"""
else:
subp[2] = f""""{subp[2].split('^^')[0]}"^^<{subp[2].split('^^')[1]}>"""
clauses.append((((((('?x' + i) + ' <ns:') + subp[1]) + '> ') + subp[2]) + ' .'))
elif (subp[0] == 'AND'):
var1 = int(subp[2][1:])
rooti = get_root(int(i))
root1 = get_root(var1)
if (rooti > root1):
identical_variables_r[rooti] = root1
else:
identical_variables_r[root1] = rooti
root1 = rooti
if (subp[1][0] == '#'):
var2 = int(subp[1][1:])
root2 = get_root(var2)
if (root1 > root2):
identical_variables_r[root1] = root2
else:
identical_variables_r[root2] = root1
else:
clauses.append((((('?x' + i) + ' <ns:type.object.type> <ns:') + subp[1]) + '> .'))
elif (subp[0] in ['le', 'lt', 'ge', 'gt']):
clauses.append((((((('?x' + i) + ' <ns:') + subp[1]) + '> ?y') + i) + ' .'))
if (subp[0] == 'le'):
op = '<='
elif (subp[0] == 'lt'):
op = '<'
elif (subp[0] == 'ge'):
op = '>='
else:
op = '>'
if subp[2].__contains__('^^'):
data_type = subp[2].split('^^')[1].split('#')[1]
if (data_type not in ['integer', 'float', 'dateTime']):
subp[2] = f""""{(subp[2].split('^^')[0] + '-08:00')}"^^<{subp[2].split('^^')[1]}>"""
else:
subp[2] = f""""{subp[2].split('^^')[0]}"^^<{subp[2].split('^^')[1]}>"""
clauses.append(f'FILTER (?y{i} {op} {subp[2]})')
elif (subp[0] == 'TC'):
var = int(subp[1][1:])
rooti = get_root(int(i))
root_var = get_root(var)
if (rooti > root_var):
identical_variables_r[rooti] = root_var
else:
identical_variables_r[root_var] = rooti
year = subp[3]
if (year == 'NOW'):
from_para = '"2015-08-10"^^xsd:dateTime'
to_para = '"2015-08-10"^^xsd:dateTime'
else:
from_para = f'"{year}-12-31"^^xsd:dateTime'
to_para = f'"{year}-01-01"^^xsd:dateTime'
clauses.append(f'FILTER(NOT EXISTS {{?x{i} <ns:{subp[2]}> ?sk0}} || ')
clauses.append(f'EXISTS {{?x{i} <ns:{subp[2]}> ?sk1 . ')
clauses.append(f'FILTER(xsd:datetime(?sk1) <= {from_para}) }})')
if (subp[2][(- 4):] == 'from'):
clauses.append(f"FILTER(NOT EXISTS {{?x{i} <ns:{(subp[2][:(- 4)] + 'to')}> ?sk2}} || ")
clauses.append(f"EXISTS {{?x{i} <ns:{(subp[2][:(- 4)] + 'to')}> ?sk3 . ")
else:
clauses.append(f"FILTER(NOT EXISTS {{?x{i} <ns:{(subp[2][:(- 9)] + 'to_date')}> ?sk2}} || ")
clauses.append(f"EXISTS {{?x{i} <ns:{(subp[2][:(- 9)] + 'to_date')}> ?sk3 . ")
clauses.append(f'FILTER(xsd:datetime(?sk3) >= {to_para}) }})')
elif (subp[0] in ['ARGMIN', 'ARGMAX']):
superlative = True
if (subp[1][0] == '#'):
var = int(subp[1][1:])
rooti = get_root(int(i))
root_var = get_root(var)
if (rooti > root_var):
identical_variables_r[rooti] = root_var
else:
identical_variables_r[root_var] = rooti
else:
clauses.append(f'?x{i} <ns:type.object.type> <ns:{subp[1]}> .')
if (len(subp) == 3):
clauses.append(f'?x{i} <ns:{subp[2]}> ?sk0 .')
elif (len(subp) > 3):
for (j, relation) in enumerate(subp[2:(- 1)]):
if (j == 0):
var0 = f'x{i}'
else:
var0 = f'c{(j - 1)}'
var1 = f'c{j}'
if (isinstance(relation, list) and (relation[0] == 'R')):
clauses.append(f'?{var1} <ns:{relation[1]}> ?{var0} .')
else:
clauses.append(f'?{var0} <ns:{relation}> ?{var1} .')
clauses.append(f'?c{j} <ns:{subp[(- 1)]}> ?sk0 .')
if (subp[0] == 'ARGMIN'):
order_clauses.append('ORDER BY ?sk0')
elif (subp[0] == 'ARGMAX'):
order_clauses.append('ORDER BY DESC(?sk0)')
order_clauses.append('LIMIT 1')
elif (subp[0] == 'COUNT'):
var = int(subp[1][1:])
root_var = get_root(var)
identical_variables_r[int(i)] = root_var
count = True
for i in range(len(clauses)):
for k in identical_variables_r:
clauses[i] = clauses[i].replace(f'?x{k} ', f'?x{get_root(k)} ')
question_var = get_root(question_var)
for i in range(len(clauses)):
clauses[i] = clauses[i].replace(f'?x{question_var} ', f'?x ')
if superlative:
arg_clauses = clauses[:]
for entity in entities:
clauses.append(f'FILTER (?x != <ns:{entity}>)')
clauses.insert(0, f"FILTER (!isLiteral(?x) || lang(?x) = '' || langMatches(lang(?x), 'en'))")
clauses.insert(0, 'WHERE {')
if count:
clauses.insert(0, f'SELECT COUNT DISTINCT ?x')
elif superlative:
clauses.insert(0, '{SELECT ?sk0')
clauses = (arg_clauses + clauses)
clauses.insert(0, 'WHERE {')
clauses.insert(0, f'SELECT DISTINCT ?x')
else:
clauses.insert(0, f'SELECT DISTINCT ?x')
clauses.append('}')
clauses.extend(order_clauses)
if superlative:
clauses.append('}')
clauses.append('}')
return '\n'.join(clauses) |
class KJTListAwaitable(Awaitable[KJTList]):
def __init__(self, awaitables: List[Awaitable[KeyedJaggedTensor]], ctx: C) -> None:
super().__init__()
self.awaitables = awaitables
self.ctx = ctx
def _wait_impl(self) -> KJTList:
kjts = [w.wait() for w in self.awaitables]
_set_sharding_context_post_a2a(kjts, self.ctx)
return KJTList(kjts) |
def compile_listings():
listing_files = {}
if os.path.isdir(DIR_LISTINGS):
for f in os.listdir(DIR_LISTINGS):
ex_name = os.path.splitext(f)[0]
f_path = os.path.join(DIR_LISTINGS, f)
if os.path.isfile(f_path):
listing_files[ex_name] = f_path
else:
print('Invalid path for {} at {}'.format(ex_name, f_path))
if listing_files:
print('Found listing files for {} exchanges: {}'.format(len(listing_files), ', '.join(list(listing_files.keys()))))
return listing_files |
class QuickCheck():
def __init__(self, ip):
self.ip = ip
self.port = 8123
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(3)
self.MSG = b''
self.raw_MSG = ''
self.measurements = pd.DataFrame()
self.data = ''
self.raw_data = b''
self.connected = False
def connect(self):
print('UDP target IP:', self.ip)
print('UDP target port:', self.port)
self.send_quickcheck('SER')
if ('SER' in self.data):
self.connected = True
print('Connected to Quickcheck')
print(self.data)
def close(self):
self.sock.close()
del self.sock
self.connected = False
def _prepare_qcheck(self):
self.MSG = ((self.raw_MSG.encode() + codecs.decode('0d', 'hex')) + codecs.decode('0a', 'hex'))
def _socket_send(self):
self.data = ''
self.sock.sendto(self.MSG, (self.ip, self.port))
(self.raw_data, _) = self.sock.recvfrom(4096)
def send_quickcheck(self, message):
self.raw_MSG = message
self._prepare_qcheck()
max_retries = 3
n_retry = 0
while True:
try:
self._socket_send()
data = self.raw_data.decode(encoding='utf-8')
self.data = data.strip('\r\n')
break
except socket.timeout:
if (n_retry == max_retries):
print('\n Connection Error - Reached max retries\n Quickcheck device unreachable, please check your settings')
self.data = ''
break
print('Connection Timeout')
n_retry += 1
print('Retrying connection {}/{}'.format(n_retry, max_retries))
def _parse_measurements(self):
data_split = self.data.split(';')
m = {}
if (data_split[0] == 'MEASGET'):
MD = re.findall('MD=\\[(.*?)\\]', self.data)[0]
m['MD_ID'] = int(re.findall('ID=(.*?);', MD)[0])
meas_date = re.findall('Date=(.*?);', MD)[0]
m['MD_Date'] = datetime.datetime.strptime(meas_date, '%Y-%m-%d').date()
meas_time = re.findall('Time=(.*?)$', MD)[0]
m['MD_Time'] = datetime.datetime.strptime(meas_time, '%H:%M:%S').time()
m['MD_DateTime'] = datetime.datetime.combine(m['MD_Date'], m['MD_Time'])
str_val = re.findall('MV=\\[(.*?)\\]', self.data)[0]
regex_map = {'MV_CAX': 'CAX=(.*?);', 'MV_G10': 'G10=(.*?);', 'MV_L10': 'L10=(.*?);', 'MV_T10': 'T10=(.*?);', 'MV_R10': 'R10=(.*?);', 'MV_G20': 'G20=(.*?);', 'MV_L20': 'L20=(.*?);', 'MV_T20': 'T20=(.*?);', 'MV_R20': 'R20=(.*?);', 'MV_E1': 'E1=(.*?);', 'MV_E2': 'E2=(.*?);', 'MV_E3': 'E3=(.*?);', 'MV_E4': 'E4=(.*?);', 'MV_Temp': 'Temp=(.*?);', 'MV_Press': 'Press=(.*?);', 'MV_CAXRate': 'CAXRate=(.*?);', 'MV_ExpTime': 'ExpTime=(.*?)$'}
for (key, pattern) in regex_map.items():
m[key] = float(re.findall(pattern, str_val)[0])
AV = re.findall('AV=\\[(.*?)\\]\\]', self.data)[0]
AV = (AV + ']')
for s in ('CAX', 'FLAT', 'SYMGT', 'SYMLR', 'BQF', 'We'):
str_val = re.findall((s + '=\\[(.*?)\\]'), AV)[0]
m[(('AV_' + s) + '_Min')] = float(re.findall('Min=(.*?);', str_val)[0])
m[(('AV_' + s) + '_Max')] = float(re.findall('Max=(.*?);', str_val)[0])
m[(('AV_' + s) + '_Target')] = float(re.findall('Target=(.*?);', str_val)[0])
m[(('AV_' + s) + '_Norm')] = float(re.findall('Norm=(.*?);', str_val)[0])
m[(('AV_' + s) + '_Value')] = float(re.findall('Value=(.*?);', str_val)[0])
m[(('AV_' + s) + '_Valid')] = int(re.findall('Valid=(.*?)$', str_val)[0])
str_val = re.findall('WORK=\\[(.*?)\\]', self.data)[0]
m['WORK_ID'] = int(re.findall('ID=(.*?);', str_val)[0])
m['WORK_Name'] = re.findall('Name=(.*?)$', str_val)[0]
str_val = re.findall('TASK=\\[(.*?)\\];MV', self.data)[0]
m['TASK_ID'] = int(re.findall('ID=(.*?);', str_val)[0])
m['TASK_TUnit'] = re.findall('TUnit=(.*?);', str_val)[0]
m['TASK_En'] = int(re.findall('En=(.*?);', str_val)[0])
m['TASK_Mod'] = re.findall('Mod=(.*?);', str_val)[0]
m['TASK_Fs'] = re.findall('Fs=(.*?);', str_val)[0]
m['TASK_SSD'] = int(re.findall('SDD=(.*?);', str_val)[0])
m['TASK_Ga'] = int(re.findall('Ga=(.*?);', str_val)[0])
m['TASK_We'] = int(re.findall('We=(.*?);', str_val)[0])
m['TASK_MU'] = int(re.findall('MU=(.*?);', str_val)[0])
m['TASK_My'] = float(re.findall('My=(.*?);', str_val)[0])
m['TASK_Info'] = re.findall('Info=(.*?)$', str_val)[0]
str_val = re.findall('Prot=\\[(.*?)\\];', str_val)[0]
m['TASK_Prot_Name'] = re.findall('Name=(.*?);', str_val)[0]
m['TASK_Prot_Flat'] = int(re.findall('Flat=(.*?);', str_val)[0])
m['TASK_Prot_Sym'] = int(re.findall('Sym=(.*?)$', str_val)[0])
elif (data_split[0] == 'MEASCNT'):
m[data_split[0]] = int(data_split[1:][0])
elif (data_split[0] in ('PTW', 'SER', 'KEY')):
m[data_split[0]] = data_split[1:]
return m
def get_measurements(self):
if (not self.connected):
raise ValueError('Quickcheck device not connected')
self.send_quickcheck('MEASCNT')
if ('MEASCNT' not in self.data):
self.send_quickcheck('MEASCNT')
m = self._parse_measurements()
if ('MEASCNT' in m):
n_meas = m['MEASCNT']
print('Receiving Quickcheck measurements')
meas_list = []
for m in tqdm.tqdm(range(n_meas)):
control = False
while (not control):
self.send_quickcheck(('MEASGET;INDEX-MEAS=' + ('%d' % (m,))))
control = (self.raw_MSG in self.data)
meas = self._parse_measurements()
meas_list.append(meas)
self.measurements = pd.DataFrame(meas_list) |
class _MixinSvgPosition():
_attribute_decorator('WidgetSpecific', 'Coordinate for Svg element.', float, {'possible_values': '', 'min': (- 65635.0), 'max': 65635.0, 'default': 1.0, 'step': 0.1})
def attr_x(self):
return self.attributes.get('x', '0')
_x.setter
def attr_x(self, value):
self.attributes['x'] = str(value)
_attribute_decorator('WidgetSpecific', 'Coordinate for Svg element.', float, {'possible_values': '', 'min': (- 65635.0), 'max': 65635.0, 'default': 1.0, 'step': 0.1})
def attr_y(self):
return self.attributes.get('y', '0')
_y.setter
def attr_y(self, value):
self.attributes['y'] = str(value)
def set_position(self, x, y):
self.attr_x = x
self.attr_y = y |
class TestCaseRetry(TestCase):
def name():
return 'retry'
def abbreviation():
return 'S'
def desc():
return 'Server sends a Retry, and a subsequent connection using the Retry token completes successfully.'
def get_paths(self):
self._files = [self._generate_random_file((10 * KB))]
return self._files
def _check_trace(self) -> bool:
tr = self._client_trace()
tokens = []
retries = tr.get_retry(Direction.FROM_SERVER)
for p in retries:
if (not hasattr(p, 'retry_token')):
logging.info("Retry packet doesn't have a retry_token")
logging.info(p)
return False
tokens += [p.retry_token.replace(':', '')]
if (len(tokens) == 0):
logging.info("Didn't find any Retry packets.")
return False
highest_pn_before_retry = (- 1)
for p in tr.get_initial(Direction.FROM_CLIENT):
pn = int(p.packet_number)
if (p.token_length == '0'):
highest_pn_before_retry = max(highest_pn_before_retry, pn)
continue
if (pn <= highest_pn_before_retry):
logging.debug('Client reset the packet number. Check failed for PN %d', pn)
return False
token = p.token.replace(':', '')
if (token in tokens):
logging.debug('Check of Retry succeeded. Token used: %s', token)
return True
logging.info("Didn't find any Initial packet using a Retry token.")
return False
def check(self) -> TestResult:
num_handshakes = self._count_handshakes()
if (num_handshakes != 1):
logging.info('Expected exactly 1 handshake. Got: %d', num_handshakes)
return TestResult.FAILED
if (not self._check_version_and_files()):
return TestResult.FAILED
if (not self._check_trace()):
return TestResult.FAILED
return TestResult.SUCCEEDED |
class TestGenerator(TestNameCheckVisitorBase):
_passes()
def test_generator_return(self):
from typing import Generator
def gen(cond) -> Generator[(int, str, float)]:
x = (yield 1)
assert_is_value(x, TypedValue(str))
(yield 'x')
if cond:
return 3.0
else:
return 'hello'
def capybara() -> Generator[(int, int, int)]:
x = (yield from gen(True))
assert_is_value(x, TypedValue(float))
return 3
_passes()
def test_iterable_return(self):
from typing import Iterable
def gen(cond) -> Iterable[int]:
x = (yield 1)
assert_is_value(x, KnownValue(None))
(yield 'x')
if cond:
return
else:
return 3
def caller() -> Iterable[int]:
x = (yield from gen(True))
assert_is_value(x, AnyValue(AnySource.generic_argument)) |
class DeleteLate(ScrimsButton):
def __init__(self, ctx: Context, letter: str):
super().__init__(emoji=ri(letter))
self.ctx = ctx
async def callback(self, interaction: Interaction):
(await interaction.response.defer())
self.view.record.autodelete_extras = (not self.view.record.autodelete_extras)
(await self.ctx.success(f"Late/Extra registration messages will **{('be' if self.view.record.autodelete_extras else 'not be')}** deleted automatically.", 3))
(await self.view.refresh_view())
(await self.view.record.confirm_all_scrims(self.ctx, autodelete_extras=self.view.record.autodelete_extras)) |
class Migration(migrations.Migration):
dependencies = [('server', '0001_initial')]
operations = [migrations.RunPython(forwards, migrations.RunPython.noop), migrations.AlterField(model_name='serverconfig', name='db_value', field=evennia.utils.picklefield.PickledObjectField(help_text='The data returned when the config value is accessed. Must be written as a Python literal if editing through the admin interface. Attribute values which are not Python literals cannot be edited through the admin interface.', null=True, verbose_name='value'))] |
def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True):
draw = ImageDraw.Draw(image)
(im_width, im_height) = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = ((xmin * im_width), (xmax * im_width), (ymin * im_height), (ymax * im_height))
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
text_bottom = top
for display_str in display_str_list[::(- 1)]:
(text_width, text_height) = font.getsize(display_str)
margin = np.ceil((0.05 * text_height))
draw.rectangle([(left, ((text_bottom - text_height) - (2 * margin))), ((left + text_width), text_bottom)], fill=color)
draw.text(((left + margin), ((text_bottom - text_height) - margin)), display_str, fill='black', font=font)
text_bottom -= (text_height - (2 * margin)) |
(Flight)
class FlightAdmin(RemoveDeleteMixin, FlightMixin, SimpleHistoryAdmin):
model = Flight
form = FlightAdminForm
save_as = True
actions = ['action_create_draft_invoice']
inlines = (AdvertisementsInline, InvoiceInline)
list_display = ('name', 'slug', 'campaign', 'live', 'start_date', 'end_date', 'sold_clicks', 'sold_impressions', 'cpc', 'cpm', 'value_remaining', 'clicks_needed_this_interval', 'views_needed_this_interval', 'priority_multiplier', 'total_clicks', 'total_views', 'num_ads', 'ctr', 'ecpm')
list_editable = ('live',)
list_filter = ('live', 'campaign__campaign_type', CPCCPMFilter, 'campaign__advertiser')
list_select_related = ('campaign', 'campaign__advertiser')
raw_id_fields = ('campaign', 'invoices')
readonly_fields = ('value_remaining', 'projected_total_value', 'total_clicks', 'total_views', 'clicks_today', 'views_today', 'clicks_needed_this_interval', 'views_needed_this_interval', 'weighted_clicks_needed_this_interval', 'modified', 'created')
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name', 'slug', 'campaign__name', 'campaign__slug')
(description=_('Create a draft invoice for selected flights'))
def action_create_draft_invoice(self, request, queryset):
if (not settings.STRIPE_ENABLED):
messages.add_message(request, messages.ERROR, _('Stripe is not configured. Please set the envvar STRIPE_SECRET_KEY.'))
return
flights = list(queryset.select_related('campaign', 'campaign__advertiser'))
if (not flights):
return
if (len({f.campaign.advertiser_id for f in flights}) > 1):
messages.add_message(request, messages.ERROR, _('All selected flights must be from a single advertiser.'))
return
earliest_start_date = min([f.start_date for f in flights])
latest_end_date = max([f.end_date for f in flights])
advertiser = [f.campaign.advertiser for f in flights][0]
if (not advertiser.djstripe_customer):
messages.add_message(request, messages.ERROR, _('No Stripe customer ID for {}'.format(advertiser)))
return
invoice_discount = None
total_cost = 0
for flight in flights:
message_components = ['Advertising', flight.name]
unit_amount = 0
quantity = 1
if flight.cpc:
message_components.append('per click')
unit_amount = (flight.cpc * 100)
quantity = flight.sold_clicks
elif flight.cpm:
unit_amount = (flight.cpm / 10)
message_components.append('${:.2f} CPM'.format(flight.cpm))
quantity = flight.sold_impressions
if flight.discount:
invoice_discount = flight.discount
total_cost += (unit_amount * quantity)
stripe.InvoiceItem.create(customer=advertiser.djstripe_customer.id, description=' - '.join(message_components), quantity=quantity, unit_amount_decimal=unit_amount, currency='USD', metadata={'Advertiser': advertiser.slug[:30], 'Flight': flight.slug[:30], 'Flight Start': flight.start_date.strftime('%Y-%m-%d'), 'Flight End': flight.end_date.strftime('%Y-%m-%d')})
description = 'Thanks for your business!'
if invoice_discount:
description = (f'Includes {invoice_discount} discount. ' + description)
inv = stripe.Invoice.create(customer=advertiser.djstripe_customer.id, auto_advance=False, collection_method='send_invoice', description=description, custom_fields=[{'name': 'Advertiser', 'value': advertiser.slug[:30]}, {'name': 'Estimated Start', 'value': earliest_start_date.strftime('%Y-%m-%d')}, {'name': 'Estimated End', 'value': latest_end_date.strftime('%Y-%m-%d')}], days_until_due=30)
invoice = Invoice.sync_from_stripe_data(inv)
if invoice.pk:
for flight in flights:
flight.invoices.add(invoice)
messages.add_message(request, messages.SUCCESS, _('New Stripe invoice for {}: {}'.format(advertiser, invoice.get_stripe_dashboard_url())))
def get_queryset(self, request):
queryset = super().get_queryset(request)
queryset = queryset.annotate(num_ads=models.Count('advertisements', distinct=True))
return queryset |
def gmetric_write(NAME, VAL, TYPE, UNITS, SLOPE, TMAX, DMAX, GROUP):
packer = Packer()
HOSTNAME = 'test'
SPOOF = 0
packer.pack_int(128)
packer.pack_string(HOSTNAME)
packer.pack_string(NAME)
packer.pack_int(SPOOF)
packer.pack_string(TYPE)
packer.pack_string(NAME)
packer.pack_string(UNITS)
packer.pack_int(slope_str2int[SLOPE])
packer.pack_uint(int(TMAX))
packer.pack_uint(int(DMAX))
if (GROUP == ''):
packer.pack_int(0)
else:
packer.pack_int(1)
packer.pack_string('GROUP')
packer.pack_string(GROUP)
data = Packer()
data.pack_int((128 + 5))
data.pack_string(HOSTNAME)
data.pack_string(NAME)
data.pack_int(SPOOF)
data.pack_string('%s')
data.pack_string(str(VAL))
return (packer.get_buffer(), data.get_buffer()) |
def analyze_egg(egg_dir, stubs):
for (flag, fn) in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if (not can_scan()):
return False
safe = True
for (base, dirs, files) in walk_egg(egg_dir):
for name in files:
if (name.endswith('.py') or name.endswith('.pyw')):
continue
elif (name.endswith('.pyc') or name.endswith('.pyo')):
safe = (scan_module(egg_dir, base, name, stubs) and safe)
return safe |
class normalizer(object):
def __init__(self):
self.target_means = None
self.target_stds = None
def fit(self, target):
target = ut.standardize_brightness(target)
(means, stds) = get_mean_std(target)
self.target_means = means
self.target_stds = stds
def transform(self, I):
I = ut.standardize_brightness(I)
(I1, I2, I3) = lab_split(I)
(means, stds) = get_mean_std(I)
norm1 = (((I1 - means[0]) * (self.target_stds[0] / stds[0])) + self.target_means[0])
norm2 = (((I2 - means[1]) * (self.target_stds[1] / stds[1])) + self.target_means[1])
norm3 = (((I3 - means[2]) * (self.target_stds[2] / stds[2])) + self.target_means[2])
return merge_back(norm1, norm2, norm3) |
def create_new_paste(contents: Union[(str, bytes)]) -> str:
import re
from urllib.request import urlopen
from urllib.parse import urlencode
params = {'code': contents, 'lexer': 'text', 'expiry': '1week'}
url = '
try:
response: str = urlopen(url, data=urlencode(params).encode('ascii')).read().decode('utf-8')
except OSError as exc_info:
return ('bad response: %s' % exc_info)
m = re.search('href="/raw/(\\w+)"', response)
if m:
return f'{url}/show/{m.group(1)}'
else:
return (("bad response: invalid format ('" + response) + "')") |
class Migration(migrations.Migration):
dependencies = [('objects', '0009_remove_objectdb_db_player')]
operations = [migrations.AlterField(model_name='objectdb', name='db_account', field=models.ForeignKey(help_text='an Account connected to this object, if any.', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='account')), migrations.AlterField(model_name='objectdb', name='db_attributes', field=models.ManyToManyField(help_text='attributes on this object. An attribute can hold any pickle-able python object (see docs for special cases).', to='typeclasses.Attribute')), migrations.AlterField(model_name='objectdb', name='db_cmdset_storage', field=models.CharField(blank=True, help_text='optional python path to a cmdset class.', max_length=255, null=True, verbose_name='cmdset')), migrations.AlterField(model_name='objectdb', name='db_date_created', field=models.DateTimeField(auto_now_add=True, verbose_name='creation date')), migrations.AlterField(model_name='objectdb', name='db_destination', field=models.ForeignKey(blank=True, help_text='a destination, used only by exit objects.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='destinations_set', to='objects.ObjectDB', verbose_name='destination')), migrations.AlterField(model_name='objectdb', name='db_home', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='homes_set', to='objects.ObjectDB', verbose_name='home location')), migrations.AlterField(model_name='objectdb', name='db_key', field=models.CharField(db_index=True, max_length=255, verbose_name='key')), migrations.AlterField(model_name='objectdb', name='db_location', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='locations_set', to='objects.ObjectDB', verbose_name='game location')), migrations.AlterField(model_name='objectdb', name='db_lock_storage', field=models.TextField(blank=True, help_text="locks limit access to an entity. A lock is defined as a 'lock string' on the form 'type:lockfunctions', defining what functionality is locked and how to determine access. Not defining a lock means no access is granted.", verbose_name='locks')), migrations.AlterField(model_name='objectdb', name='db_sessid', field=models.CharField(help_text='csv list of session ids of connected Account, if any.', max_length=32, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z', 32), code='invalid', message='Enter only digits separated by commas.')], verbose_name='session id')), migrations.AlterField(model_name='objectdb', name='db_tags', field=models.ManyToManyField(help_text='tags on this object. Tags are simple string markers to identify, group and alias objects.', to='typeclasses.Tag')), migrations.AlterField(model_name='objectdb', name='db_typeclass_path', field=models.CharField(help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.", max_length=255, null=True, verbose_name='typeclass'))] |
class Migration(migrations.Migration):
dependencies = [('tasks', '0030_available')]
operations = [migrations.AlterField(model_name='task', name='conditions', field=models.ManyToManyField(blank=True, help_text='The list of conditions evaluated for this task.', related_name='tasks', to='conditions.Condition', verbose_name='Conditions')), migrations.AlterField(model_name='task', name='end_attribute', field=models.ForeignKey(blank=True, help_text='The attribute that is setting the end date for this task (optional, if no end date attribute is given, the start date attribute sets also the end date).', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks_as_end', to='domain.Attribute', verbose_name='End date attribute')), migrations.AlterField(model_name='task', name='start_attribute', field=models.ForeignKey(blank=True, help_text='The attribute that is setting the start date for this task.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks_as_start', to='domain.Attribute', verbose_name='Start date attribute'))] |
_layer('gineconv')
class GINEConvGraphGymLayer(nn.Module):
def __init__(self, layer_config: LayerConfig, **kwargs):
super().__init__()
gin_nn = nn.Sequential(Linear_pyg(layer_config.dim_in, layer_config.dim_out), nn.ReLU(), Linear_pyg(layer_config.dim_out, layer_config.dim_out))
self.model = pyg_nn.GINEConv(gin_nn)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index, batch.edge_attr)
return batch |
def has(cls):
attrs = getattr(cls, '__attrs_attrs__', None)
if (attrs is not None):
return True
generic_base = get_generic_base(cls)
if (generic_base is not None):
generic_attrs = getattr(generic_base, '__attrs_attrs__', None)
if (generic_attrs is not None):
cls.__attrs_attrs__ = generic_attrs
return (generic_attrs is not None)
return False |
class Effect7098(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
for attr in ('hp', 'armorHP', 'shieldCapacity', 'capacitorCapacity'):
fit.ship.boostItemAttr(attr, src.getModifiedItemAttr('conversionRigHPCapBonus'), **kwargs)
fit.ship.boostItemAttr('maxTargetRange', src.getModifiedItemAttr('structureRigMaxTargetRangeBonus'), stackingPenalties=True, **kwargs)
fit.ship.boostItemAttr('scanResolution', src.getModifiedItemAttr('structureRigScanResBonus'), stackingPenalties=True, **kwargs)
fit.ship.increaseItemAttr('maxLockedTargets', src.getModifiedItemAttr('structureRigMaxTargetBonus'), **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Structure Area Denial Module')), 'capacitorNeed', src.getModifiedItemAttr('structureRigPDCapUseBonus'), **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Structure Area Denial Module')), 'empFieldRange', src.getModifiedItemAttr('structureRigPDRangeBonus'), **kwargs)
for attr in ('structureRigMaxTargetBonus', 'structureRigMaxTargetRangeBonus', 'structureRigScanResBonus'):
fit.modules.filteredItemForce((lambda mod: (mod.item.group.name == 'Structure Combat Rig L - Max Targets and Sensor Boosting')), attr, src.getModifiedItemAttr('constantZero'), **kwargs)
for attr in ('structureRigPDCapUseBonus', 'structureRigPDRangeBonus'):
fit.modules.filteredItemForce((lambda mod: (mod.item.group.name == 'Structure Combat Rig L - Point Defense Battery Application and Projection')), attr, src.getModifiedItemAttr('constantZero'), **kwargs) |
class Names(unittest.TestCase):
def test_long_name(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion('this.is.a.very.long.name.with.lots.of.parts.in.it.local.', const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_exceedingly_long_name(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
name = ('%slocal.' % ('part.' * 1000))
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_extra_exceedingly_long_name(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
name = ('%slocal.' % ('part.' * 4000))
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_exceedingly_long_name_part(self):
name = ('%s.local.' % ('a' * 1000))
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
self.assertRaises(r.NamePartTooLongException, generated.packets)
def test_same_name(self):
name = 'paired.local.'
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_verify_name_change_with_lots_of_names(self):
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = '_my-service._tcp.local.'
name = 'a wonderful service'
server_count = 300
self.generate_many_hosts(zc, type_, name, server_count)
self.verify_name_change(zc, type_, name, server_count)
zc.close()
def test_large_packet_exception_log_handling(self):
zc = Zeroconf(interfaces=['127.0.0.1'])
with patch('zeroconf._logger.log.warning') as mocked_log_warn, patch('zeroconf._logger.log.debug') as mocked_log_debug:
out = r.DNSOutgoing((const._FLAGS_QR_RESPONSE | const._FLAGS_AA))
out.data.append((b'\x00' * 10000))
call_counts = (mocked_log_warn.call_count, mocked_log_debug.call_count)
zc.send(out)
assert (mocked_log_warn.call_count == call_counts[0])
zc.send(out)
assert (mocked_log_warn.call_count == call_counts[0])
call_counts = (mocked_log_warn.call_count, mocked_log_debug.call_count)
zc.send(out, const._MDNS_ADDR, const._MDNS_PORT)
zc.send(out, const._MDNS_ADDR, const._MDNS_PORT)
time.sleep(0.3)
r.log.debug('warn %d debug %d was %s', mocked_log_warn.call_count, mocked_log_debug.call_count, call_counts)
assert (mocked_log_debug.call_count > call_counts[0])
zc.close()
def verify_name_change(self, zc, type_, name, number_hosts):
desc = {'path': '/~paulsm/'}
info_service = ServiceInfo(type_, f'{name}.{type_}', 80, 0, 0, desc, 'ash-2.local.', addresses=[socket.inet_aton('10.0.1.2')])
self.assertRaises(r.NonUniqueNameException, zc.register_service, info_service)
zc.register_service(info_service, cooperating_responders=True)
info_service2 = ServiceInfo(type_, f'{name}.{type_}', 80, 0, 0, desc, 'ash-2.local.', addresses=[socket.inet_aton('10.0.1.2')])
zc.register_service(info_service2, allow_name_change=True)
assert (info_service2.name.split('.')[0] == ('%s-%d' % (name, (number_hosts + 1))))
def generate_many_hosts(self, zc, type_, name, number_hosts):
block_size = 25
number_hosts = (int((((number_hosts - 1) / block_size) + 1)) * block_size)
out = r.DNSOutgoing((const._FLAGS_QR_RESPONSE | const._FLAGS_AA))
for i in range(1, (number_hosts + 1)):
next_name = (name if (i == 1) else ('%s-%d' % (name, i)))
self.generate_host(out, next_name, type_)
_inject_responses(zc, [r.DNSIncoming(packet) for packet in out.packets()])
def generate_host(out, host_name, type_):
name = '.'.join((host_name, type_))
out.add_answer_at_time(r.DNSPointer(type_, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, name), 0)
out.add_answer_at_time(r.DNSService(type_, const._TYPE_SRV, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_HOST_TTL, 0, 0, 80, name), 0) |
def random_search(scores_info_export_path, num_trials, report_oracle_bleu=False):
with open(scores_info_export_path, 'rb') as f:
scores_info = pickle.load(f)
dummy_task = DummyTask()
if report_oracle_bleu:
oracle_scorer = bleu.Scorer(bleu.BleuConfig(pad=vocab_constants.PAD_ID, eos=vocab_constants.EOS_ID, unk=vocab_constants.UNK_ID))
for example in scores_info:
smoothed_bleu = []
for hypo in example['hypos']:
eval_score = smoothed_sentence_bleu(dummy_task, torch.IntTensor(example['target_tokens']), torch.IntTensor(hypo))
smoothed_bleu.append(eval_score)
best_hypo_ind = np.argmax(smoothed_bleu)
example['best_hypo_ind'] = best_hypo_ind
oracle_scorer.add(torch.IntTensor(example['target_tokens']), torch.IntTensor(example['hypos'][best_hypo_ind]))
print('oracle BLEU: ', oracle_scorer.score())
num_features = scores_info[0]['scores'].shape[1]
assert all(((example['scores'].shape[1] == num_features) for example in scores_info)), 'All examples must have the same number of scores!'
feature_weights = np.zeros(num_features)
feature_weights[0] = 1
score = evaluate_weights(scores_info, feature_weights, length_penalty=1)
print('base BLEU: ', score)
best_score = score
best_weights = feature_weights
best_length_penalty = 0
nonzero_features = identify_nonzero_features(scores_info)
for i in range(num_trials):
feature_weights = np.zeros(num_features)
random_weights = np.random.dirichlet(np.ones(nonzero_features.size))
feature_weights[nonzero_features] = random_weights
length_penalty = (1.5 * np.random.random())
score = evaluate_weights(scores_info, feature_weights, length_penalty)
if (score > best_score):
best_score = score
best_weights = feature_weights
best_length_penalty = length_penalty
print(f'''
[{i}] best: {best_score}''', end='', flush=True)
print()
print('best weights: ', best_weights)
print('best length penalty: ', length_penalty)
return (best_weights, best_length_penalty, best_score) |
class TestPortaraDataProviderDaily(TestCase):
def setUpClass(cls) -> None:
cls.start_date = str_to_date('2021-05-18')
cls.end_date = str_to_date('2021-06-28')
cls.number_of_data_bars = 29
cls.fields = PriceField.ohlcv()
cls.ticker = PortaraTicker('AB', SecurityType.FUTURE, 1)
cls.tickers = [PortaraTicker('AB', SecurityType.FUTURE, 1), PortaraTicker('ABCD', SecurityType.FUTURE, 7)]
cls.future_ticker = PortaraFutureTicker('', 'AB{}', 1, 1)
cls.future_tickers = [PortaraFutureTicker('', 'AB{}', 1, 1), PortaraFutureTicker('', 'ABCD{}', 1, 7)]
cls.futures_path = str(((Path(__file__).parent / Path('input_data')) / Path('Futures')))
def get_data_provider(self, tickers, fields) -> PortaraDataProvider:
return PortaraDataProvider(self.futures_path, tickers, fields, self.start_date, self.end_date, Frequency.DAILY)
def test_get_price_single_ticker_many_fields_many_dates(self):
data_provider = self.get_data_provider(self.ticker, self.fields)
prices = data_provider.get_price(self.ticker, self.fields, self.start_date, self.end_date)
self.assertEqual(type(prices), PricesDataFrame)
self.assertEqual(prices.shape, (self.number_of_data_bars, len(self.fields)))
self.assertEqual(Frequency.infer_freq(prices.index), Frequency.DAILY)
def test_get_price_single_ticker_many_fields_single_date(self):
date = str_to_date('2021-06-11')
data_provider = self.get_data_provider(self.ticker, self.fields)
prices = data_provider.get_price(self.ticker, self.fields, date, date)
self.assertEqual(type(prices), PricesSeries)
self.assertEqual(prices.shape, (len(self.fields),))
def test_get_price_single_ticker_single_field_many_dates(self):
data_provider = self.get_data_provider(self.ticker, PriceField.Close)
prices = data_provider.get_price(self.ticker, PriceField.Close, self.start_date, self.end_date)
self.assertEqual(type(prices), PricesSeries)
self.assertEqual(prices.shape, (self.number_of_data_bars,))
def test_get_price_single_ticker_single_field_single_date(self):
date = str_to_date('2021-06-11')
data_provider = self.get_data_provider(self.ticker, PriceField.Close)
prices = data_provider.get_price(self.ticker, PriceField.Close, date, date)
self.assertEqual(prices, 61656)
def test_get_price_many_tickers_many_fields_many_dates(self):
data_provider = self.get_data_provider(self.tickers, self.fields)
prices = data_provider.get_price(self.tickers, self.fields, self.start_date, self.end_date)
self.assertEqual(type(prices), QFDataArray)
self.assertEqual(prices.shape, (self.number_of_data_bars, len(self.tickers), len(self.fields)))
self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.dates.values)), Frequency.DAILY)
def test_get_price_many_tickers_single_field_many_dates(self):
data_provider = self.get_data_provider(self.tickers, PriceField.Close)
prices = data_provider.get_price(self.tickers, PriceField.Close, self.start_date, self.end_date)
self.assertEqual(type(prices), PricesDataFrame)
self.assertEqual(prices.shape, (self.number_of_data_bars, len(self.tickers)))
self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.index)), Frequency.DAILY)
def test_get_price_many_tickers_many_fields_single_date(self):
date = str_to_date('2021-06-11')
data_provider = self.get_data_provider(self.tickers, self.fields)
prices = data_provider.get_price(self.tickers, self.fields, date, date)
self.assertEqual(type(prices), PricesDataFrame)
self.assertEqual(prices.shape, (len(self.tickers), len(self.fields)))
def test_get_price_many_tickers_single_field_single_date(self):
date = str_to_date('2021-06-11')
data_provider = self.get_data_provider(self.tickers, PriceField.Close)
prices = data_provider.get_price(self.tickers, PriceField.Close, date, date)
self.assertEqual(type(prices), PricesSeries)
self.assertEqual(prices.shape, (len(self.tickers),))
def test_get_price_single_future_ticker_many_fields(self):
data_provider = self.get_data_provider(self.future_ticker, self.fields)
tickers_to_check = [PortaraTicker('AB2021M', SecurityType.FUTURE, 1), PortaraTicker('AB2021U', SecurityType.FUTURE, 1)]
prices = data_provider.get_price(tickers_to_check, self.fields, self.start_date, self.end_date)
self.assertEqual(type(prices), QFDataArray)
self.assertEqual(prices.shape, (self.number_of_data_bars, len(tickers_to_check), len(self.fields)))
self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.dates.values)), Frequency.DAILY)
self.assertCountEqual(prices.tickers.values, tickers_to_check)
def test_get_price_many_future_tickers_many_fields(self):
data_provider = self.get_data_provider(self.future_tickers, self.fields)
tickers_to_check = [PortaraTicker('AB2021M', SecurityType.FUTURE, 1), PortaraTicker('AB2021U', SecurityType.FUTURE, 1), PortaraTicker('ABCD2021M', SecurityType.FUTURE, 7), PortaraTicker('ABCD2021N', SecurityType.FUTURE, 7), PortaraTicker('ABCD2021Q', SecurityType.FUTURE, 7)]
prices = data_provider.get_price(tickers_to_check, self.fields, self.start_date, self.end_date)
self.assertEqual(type(prices), QFDataArray)
self.assertEqual(prices.shape, (self.number_of_data_bars, len(tickers_to_check), len(self.fields)))
self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.dates.values)), Frequency.DAILY)
self.assertCountEqual(prices.tickers.values, tickers_to_check)
def test_get_fut_chain_single_future_ticker(self):
data_provider = self.get_data_provider(self.future_ticker, self.fields)
fut_chain = data_provider.get_futures_chain_tickers(self.future_ticker, ExpirationDateField.LastTradeableDate)
self.assertTrue(fut_chain)
self.assertEqual(type(fut_chain), dict)
self.assertEqual(type(fut_chain[self.future_ticker]), QFDataFrame)
self.assertEqual(fut_chain[self.future_ticker].shape, (4, 1))
def test_get_fut_chain_many_future_tickers(self):
data_provider = self.get_data_provider(self.future_tickers, self.fields)
fut_chain = data_provider.get_futures_chain_tickers(self.future_tickers, ExpirationDateField.LastTradeableDate)
self.assertTrue(fut_chain)
self.assertEqual(type(fut_chain), dict)
self.assertEqual(type(fut_chain[self.future_tickers[0]]), QFDataFrame)
self.assertEqual(fut_chain[self.future_tickers[0]].shape, (4, 1))
self.assertEqual(fut_chain[self.future_tickers[1]].shape, (3, 1)) |
class Effect4058(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, beacon, context, projectionRange, **kwargs):
fit.modules.filteredChargeMultiply((lambda mod: mod.charge.requiresSkill('Rockets')), 'explosiveDamage', beacon.getModifiedItemAttr('smallWeaponDamageMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs) |
class BasicBlock3d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, spatial_stride=1, temporal_stride=1, dilation=1, downsample=None, style='pytorch', inflate=True, non_local=False, non_local_cfg=dict(), conv_cfg=dict(typename='Conv3d'), norm_cfg=dict(typename='BN3d'), act_cfg=dict(typename='ReLU'), with_cp=False, **kwargs):
super().__init__()
assert (style in ['pytorch', 'caffe'])
assert set(kwargs).issubset(['inflate_style'])
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
conv1_kernel_size = (3, 3, 3)
conv1_padding = (1, dilation, dilation)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, 1, 1)
else:
conv1_kernel_size = (1, 3, 3)
conv1_padding = (0, dilation, dilation)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, 1, 1)
self.conv1 = ConvModule(inplanes, planes, conv1_kernel_size, stride=(self.conv1_stride_t, self.conv1_stride_s, self.conv1_stride_s), padding=conv1_padding, dilation=(1, dilation, dilation), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.conv2 = ConvModule(planes, (planes * self.expansion), conv2_kernel_size, stride=(self.conv2_stride_t, self.conv2_stride_s, self.conv2_stride_s), padding=conv2_padding, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_module = build_non_local_module(self.non_local_cfg, default_args=dict(in_channels=self.conv2.norm.num_features))
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out = (out + identity)
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_module(out)
return out |
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--pause', action='store_true', help='pause')
args = parser.parse_args()
pybullet_planning.connect()
pybullet_planning.add_data_path()
p.setGravity(0, 0, (- 9.8))
with pybullet_planning.LockRenderer():
p.loadURDF('plane.urdf')
ri = reorientbot.pybullet.PandaRobotInterface()
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=(- 45), cameraPitch=(- 45), cameraTargetPosition=(0.5, 0, 0))
data = np.load('assets/pile_001.npz')
class_ids = data['class_ids']
positions = data['positions']
quaternions = data['quaternions']
for (class_id, position, quaternion) in zip(class_ids, positions, quaternions):
visual_file = reorientbot.datasets.ycb.get_visual_file(class_id=class_id)
collision_file = reorientbot.pybullet.get_collision_file(visual_file)
c_obj_to_world = reorientbot.geometry.Coordinate(position, quaternion)
c_obj_to_world.translate([0.5, 0, 0], wrt='world')
reorientbot.pybullet.create_mesh_body(visual_file=collision_file, collision_file=collision_file, position=c_obj_to_world.position, quaternion=c_obj_to_world.quaternion, mass=0.1, rgba_color=list((imgviz.label_colormap()[class_id] / 255)))
if args.pause:
print("Please press 'n' to start")
while True:
if (ord('n') in p.getKeyboardEvents()):
break
c_camera_to_world = reorientbot.geometry.Coordinate()
c_camera_to_world.rotate([0, 0, np.deg2rad((- 90))])
c_camera_to_world.rotate([np.deg2rad((- 180)), 0, 0])
c_camera_to_world.translate([0.5, 0, 0.7], wrt='world')
fovy = np.deg2rad(60)
height = 480
width = 640
pybullet_planning.draw_pose(c_camera_to_world.pose)
reorientbot.pybullet.draw_camera(fovy, height=height, width=width, pose=c_camera_to_world.pose)
np.random.seed(0)
while True:
c_ee_to_world = c_camera_to_world.copy()
c_ee_to_world.translate(np.random.uniform([(- 0.2), (- 0.2), 0.4], [0.2, 0.2, 0.4]))
j = ri.solve_ik(c_ee_to_world.pose, rotation_axis=True)
for _ in ri.movej(j):
p.stepSimulation()
time.sleep((1 / 240))
for i in ri.grasp():
p.stepSimulation()
time.sleep((1 / 240))
if (i > (5 * 240)):
print('Warning: timeout while trying to grasp')
break
reorientbot.pybullet.step_and_sleep(1)
if ri.gripper.check_grasp():
c = reorientbot.geometry.Coordinate(*pybullet_planning.get_link_pose(ri.robot, ri.ee))
c.translate([0, 0, 0.5], wrt='world')
j = ri.solve_ik(c.pose, rotation_axis='z')
if (j is False):
j = ri.solve_ik(c.pose, rotation_axis=None)
for _ in ri.movej(j):
p.stepSimulation()
time.sleep((1 / 240))
reorientbot.pybullet.step_and_sleep(3)
ri.ungrasp()
for _ in ri.movej(ri.homej):
p.stepSimulation()
time.sleep((1 / 240))
while True:
p.stepSimulation()
time.sleep((1 / 240)) |
def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
try:
print(('%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x' % ('Trace error', pid, tid, cpu, (ts / ), (ts % ), typ, code, msg, ip)))
except broken_pipe_exception:
sys.stdout = open(os.devnull, 'w')
sys.exit(1) |
def summaryCSS(title, center=True):
tdcenter = ('text-align:center;' if center else '')
out = (((('<!DOCTYPE html>\n<html>\n<head>\n\t<meta content="text/html; charset=UTF-8">\n\t<title>' + title) + '</title>\n\t<style type=\'text/css\'>\n\t\t.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\t\ttable {width:100%;border-collapse: collapse;border:1px solid;}\n\t\tth {border: 1px solid black;background:#222;color:white;}\n\t\ttd {font: 14px "Times New Roman";') + tdcenter) + '}\n\t\ttr.head td {border: 1px solid black;background:#aaa;}\n\t\ttr.alt {background-color:#ddd;}\n\t\ttr.notice {color:red;}\n\t\t.minval {background-color:#BBFFBB;}\n\t\t.medval {background-color:#BBBBFF;}\n\t\t.maxval {background-color:#FFBBBB;}\n\t\t.head a {color:#000;text-decoration: none;}\n\t</style>\n</head>\n<body>\n')
return out |
class MouseEvent(QtGui.QMouseEvent):
def get_state(obj, picklable=False):
typ = obj.type()
if isinstance(typ, int):
typ = QtCore.QEvent.Type(typ)
lpos = (obj.position() if hasattr(obj, 'position') else obj.localPos())
gpos = (obj.globalPosition() if hasattr(obj, 'globalPosition') else obj.screenPos())
(btn, btns, mods) = (obj.button(), obj.buttons(), obj.modifiers())
if picklable:
(typ, btn, btns, mods) = serialize_mouse_enum(typ, btn, btns, mods)
return (typ, lpos, gpos, btn, btns, mods)
def __init__(self, rhs):
super().__init__(*self.get_state(rhs))
def __getstate__(self):
return self.get_state(self, picklable=True)
def __setstate__(self, state):
(typ, lpos, gpos, btn, btns, mods) = state
typ = QtCore.QEvent.Type(typ)
btn = QtCore.Qt.MouseButton(btn)
if (not isinstance(btns, enum.Enum)):
btns = QtCore.Qt.MouseButtons(btns)
if (not isinstance(mods, enum.Enum)):
mods = QtCore.Qt.KeyboardModifiers(mods)
super().__init__(typ, lpos, gpos, btn, btns, mods) |
class ExamplesTests(TestCasePlus):
def test_run_glue(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_glue.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
def test_run_clm(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_clm.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
if (torch.cuda.device_count() > 1):
return
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
with patch.object(sys, 'argv', testargs):
run_clm.main()
result = get_results(tmp_dir)
self.assertLess(result['perplexity'], 100)
def test_run_clm_config_overrides(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_clm.py
--model_type gpt2
--tokenizer_name gpt2
--train_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--config_overrides n_embd=10,n_head=2
'''.split()
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
logger = run_clm.logger
with patch.object(sys, 'argv', testargs):
with CaptureLogger(logger) as cl:
run_clm.main()
self.assertIn('"n_embd": 10', cl.out)
self.assertIn('"n_head": 2', cl.out)
def test_run_mlm(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--prediction_loss_only
--num_train_epochs=1
'''.split()
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
with patch.object(sys, 'argv', testargs):
run_mlm.main()
result = get_results(tmp_dir)
self.assertLess(result['perplexity'], 42)
def test_run_ner(self):
epochs = (7 if (get_gpu_count() > 1) else 2)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
with patch.object(sys, 'argv', testargs):
run_ner.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
self.assertLess(result['eval_loss'], 0.5)
def test_run_squad(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=10
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(sys, 'argv', testargs):
run_squad.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_f1'], 30)
self.assertGreaterEqual(result['eval_exact'], 30)
def test_run_squad_seq2seq(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_seq2seq_qa.py
--model_name_or_path t5-small
--context_column context
--question_column question
--answer_column answers
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=10
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(sys, 'argv', testargs):
run_squad_seq2seq.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_f1'], 30)
self.assertGreaterEqual(result['eval_exact'], 30)
def test_run_swag(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_swag.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=20
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(sys, 'argv', testargs):
run_swag.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.8)
def test_generation(self):
testargs = ['run_generation.py', '--prompt=Hello', '--length=10', '--seed=42']
if is_cuda_and_apex_available():
testargs.append('--fp16')
(model_type, model_name) = ('--model_type=gpt2', '--model_name_or_path=sshleifer/tiny-gpt2')
with patch.object(sys, 'argv', (testargs + [model_type, model_name])):
result = run_generation.main()
self.assertGreaterEqual(len(result[0]), 10)
def test_run_summarization(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=50
--warmup_steps=8
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(sys, 'argv', testargs):
run_summarization.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_rouge1'], 10)
self.assertGreaterEqual(result['eval_rouge2'], 2)
self.assertGreaterEqual(result['eval_rougeL'], 7)
self.assertGreaterEqual(result['eval_rougeLsum'], 7)
def test_run_translation(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_translation.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=50
--warmup_steps=8
--do_train
--do_eval
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
--source_lang en_XX
--target_lang ro_RO
'''.split()
with patch.object(sys, 'argv', testargs):
run_translation.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_bleu'], 30)
def test_run_image_classification(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_image_classification.py
--output_dir {tmp_dir}
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--dataloader_num_workers 16
--metric_for_best_model accuracy
--max_steps 10
--train_val_split 0.1
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_image_classification.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.8)
def test_run_speech_recognition_ctc(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_speech_recognition_ctc.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--eval_split_name validation
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--preprocessing_num_workers 16
--max_steps 10
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_speech_recognition_ctc.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_loss'], result['train_loss'])
def test_run_speech_recognition_seq2seq(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_speech_recognition_seq2seq.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-speech-encoder-decoder
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--eval_split_name validation
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 4
--remove_unused_columns False
--overwrite_output_dir True
--preprocessing_num_workers 16
--max_steps 10
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_speech_recognition_seq2seq.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_loss'], result['train_loss'])
def test_run_audio_classification(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_audio_classification.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name anton-l/superb_demo
--dataset_config_name ks
--train_split_name test
--eval_split_name test
--audio_column_name audio
--label_column_name label
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--num_train_epochs 10
--max_steps 50
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_audio_classification.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_loss'], result['train_loss'])
def test_run_wav2vec2_pretraining(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_wav2vec2_pretraining_no_trainer.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_names clean
--dataset_split_names validation
--learning_rate 1e-4
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--preprocessing_num_workers 16
--max_train_steps 2
--validation_split_percentage 5
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_wav2vec2_pretraining_no_trainer.main()
model = Wav2Vec2ForPreTraining.from_pretrained(tmp_dir)
self.assertIsNotNone(model)
def test_run_vit_mae_pretraining(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_mae.py
--output_dir {tmp_dir}
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--dataloader_num_workers 16
--metric_for_best_model accuracy
--max_steps 10
--train_val_split 0.1
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_mae.main()
model = ViTMAEForPreTraining.from_pretrained(tmp_dir)
self.assertIsNotNone(model)
def test_run_semantic_segmentation(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_semantic_segmentation.py
--output_dir {tmp_dir}
--dataset_name huggingface/semantic-segmentation-test-sample
--do_train
--do_eval
--remove_unused_columns False
--overwrite_output_dir True
--max_steps 10
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--seed 32
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_semantic_segmentation.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_overall_accuracy'], 0.1) |
def _restore_curry(cls, func, args, kwargs, userdict, is_decorated):
if isinstance(func, str):
(modname, qualname) = func.rsplit(':', 1)
obj = import_module(modname)
for attr in qualname.split('.'):
obj = getattr(obj, attr)
if is_decorated:
return obj
func = obj.func
obj = cls(func, *args, **(kwargs or {}))
obj.__dict__.update(userdict)
return obj |
class FixedLengthProcessionSpeed(ProcessingSpeedColumn):
def __init__(self, style: Union[(str, Any)]):
super().__init__(style)
self.max_length = len('0.00')
def render(self, task) -> RenderableType:
task_speed = (f'{task.speed:>.2f}' if (task.speed is not None) else '0.00')
self.max_length = max(len(task_speed), self.max_length)
task_speed = ((' ' * (self.max_length - len(task_speed))) + task_speed)
return Text(f'{task_speed}it/s', style=self.style, justify='center') |
def load_model():
if (opt.model == 'resnet_32'):
from gen_models.resnet_32 import ResNetGenerator
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
elif (opt.model == 'resnet_64'):
from gen_models.resnet_64 import ResNetGenerator
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
elif (opt.model == 'resnet_128'):
from gen_models.resnet_small import ResNetGenerator
gen = ResNetGenerator(ch=opt.ngf, dim_z=opt.nz, bottom_width=opt.start_width, n_classes=opt.nclass)
else:
raise ValueError(f'Unknown model name: {opt.model}')
if (opt.ngpu > 0):
gen = gen.cuda()
gen = torch.nn.DataParallel(gen, device_ids=range(opt.ngpu))
else:
raise ValueError('Must run on gpus, ngpu > 0')
gen.load_state_dict(torch.load(opt.model_in))
return gen |
class Throughput(Metric[float]):
def __init__(self: TThroughput, *, device: Optional[torch.device]=None) -> None:
super().__init__(device=device)
self._add_state('num_total', 0.0)
self._add_state('elapsed_time_sec', 0.0)
_mode()
def update(self: TThroughput, num_processed: int, elapsed_time_sec: float) -> TThroughput:
if (num_processed < 0):
raise ValueError(f'Expected num_processed to be a non-negative number, but received {num_processed}.')
if (elapsed_time_sec <= 0):
raise ValueError(f'Expected elapsed_time_sec to be a positive number, but received {elapsed_time_sec}.')
self.elapsed_time_sec += elapsed_time_sec
self.num_total += num_processed
return self
_mode()
def compute(self: TThroughput) -> float:
if (not self.elapsed_time_sec):
_logger.warning('No calls to update() have been made - returning 0.0')
return 0.0
return (self.num_total / self.elapsed_time_sec)
_mode()
def merge_state(self: TThroughput, metrics: Iterable[TThroughput]) -> TThroughput:
for metric in metrics:
self.num_total += metric.num_total
self.elapsed_time_sec = max(self.elapsed_time_sec, metric.elapsed_time_sec)
return self |
class Pizza(ABC):
name: str
dough: str
sauce: str
toppings: List[str]
def prepare(self) -> None:
print(f'Prepare {self.name}')
print('Tossing dough...')
print('Adding sauce...')
print('Adding toppings: ')
for topping in self.toppings:
print(f' {topping}')
def bake(self) -> None:
print('Bake for 25 minutes at 350')
def cut(self) -> None:
print('Cut the pizza into diagonal slices')
def box(self) -> None:
print('Place pizza in official PizzaStore box')
def getName(self) -> str:
return self.name
def toString(self) -> str:
display: StringBuffer = StringBuffer()
display.append(f'''---- {self.name} ----
''')
display.append(f'''{self.dough}
''')
display.append(f'''{self.sauce}
''')
for topping in self.toppings:
display.append(f'''{topping}
''')
return display.toString() |
class TestHarnessSimple(Component):
def construct(s, MsgType, SrcType, SinkType, src_msgs, sink_msgs):
s.src = SrcType(MsgType, src_msgs)
s.sink = SinkType(MsgType, sink_msgs)
connect(s.src.send, s.sink.recv)
def done(s):
return (s.src.done() and s.sink.done())
def line_trace(s):
return '{} > {}'.format(s.src.line_trace(), s.sink.line_trace()) |
class ApproveSponsorshipApplicationUseCaseTests(TestCase):
def setUp(self):
self.notifications = [Mock(), Mock()]
self.use_case = use_cases.ApproveSponsorshipApplicationUseCase(self.notifications)
self.user = baker.make(settings.AUTH_USER_MODEL)
self.sponsorship = baker.make(Sponsorship, _fill_optional='sponsor')
self.package = baker.make('sponsors.SponsorshipPackage')
today = date.today()
self.data = {'sponsorship_fee': 100, 'package': self.package, 'start_date': today, 'end_date': (today + timedelta(days=10))}
def test_update_sponsorship_as_approved_and_create_contract(self):
self.use_case.execute(self.sponsorship, **self.data)
self.sponsorship.refresh_from_db()
today = timezone.now().date()
self.assertEqual(self.sponsorship.approved_on, today)
self.assertEqual(self.sponsorship.status, Sponsorship.APPROVED)
self.assertTrue(self.sponsorship.contract.pk)
self.assertTrue(self.sponsorship.start_date)
self.assertTrue(self.sponsorship.end_date)
self.assertEqual(self.sponsorship.sponsorship_fee, 100)
self.assertEqual(self.sponsorship.package, self.package)
self.assertEqual(self.sponsorship.level_name, self.package.name)
self.assertFalse(self.sponsorship.renewal)
def test_update_renewal_sponsorship_as_approved_and_create_contract(self):
self.data.update({'renewal': True})
self.use_case.execute(self.sponsorship, **self.data)
self.sponsorship.refresh_from_db()
today = timezone.now().date()
self.assertEqual(self.sponsorship.approved_on, today)
self.assertEqual(self.sponsorship.status, Sponsorship.APPROVED)
self.assertTrue(self.sponsorship.contract.pk)
self.assertTrue(self.sponsorship.start_date)
self.assertTrue(self.sponsorship.end_date)
self.assertEqual(self.sponsorship.sponsorship_fee, 100)
self.assertEqual(self.sponsorship.package, self.package)
self.assertEqual(self.sponsorship.level_name, self.package.name)
self.assertEqual(self.sponsorship.renewal, True)
def test_send_notifications_using_sponsorship(self):
self.use_case.execute(self.sponsorship, **self.data)
for n in self.notifications:
n.notify.assert_called_once_with(request=None, sponsorship=self.sponsorship, contract=self.sponsorship.contract)
def test_build_use_case_with_default_notificationss(self):
uc = use_cases.ApproveSponsorshipApplicationUseCase.build()
self.assertEqual(len(uc.notifications), 1)
self.assertIsInstance(uc.notifications[0], SponsorshipApprovalLogger) |
def _validate_coincident(triangulator):
(triangulator)
def tri_with_validation(coordinates, ids=None, coincident='raise', kernel=None, bandwidth=None, seed=None, **kwargs):
(coordinates, ids, geoms) = _validate_geometry_input(coordinates, ids=ids, valid_geometry_types=_VALID_GEOMETRY_TYPES)
(n_coincident, coincident_lut) = _build_coincidence_lookup(geoms)
if (n_coincident > 0):
if (coincident == 'raise'):
raise ValueError(f"There are {len(coincident_lut)} unique locations in the dataset, but {len(geoms)} observations. This means there are multiple points in the same location, which is undefined for this graph type. To address this issue, consider setting `coincident='clique'` or consult the documentation about coincident points.")
elif (coincident == 'jitter'):
(coordinates, geoms) = _jitter_geoms(coordinates, geoms, seed=seed)
elif (coincident == 'clique'):
(input_coordinates, input_ids, input_geoms) = (coordinates, ids, geoms)
(coordinates, ids, geoms) = _validate_geometry_input(coincident_lut.geometry, ids=coincident_lut.index, valid_geometry_types=_VALID_GEOMETRY_TYPES)
else:
raise ValueError(f"Recieved option coincident='{coincident}', but only options 'raise','clique','jitter' are suppported.")
(heads_ix, tails_ix) = triangulator(coordinates, **kwargs)
(heads, tails) = (ids[heads_ix], ids[tails_ix])
if (kernel is None):
weights = numpy.ones(heads_ix.shape, dtype=numpy.int8)
else:
distances = _vec_euclidean_distances(coordinates[heads_ix], coordinates[tails_ix]).squeeze()
sparse_d = sparse.csc_array((distances, (heads_ix, tails_ix)))
if (bandwidth == 'auto'):
bandwidth = _optimize_bandwidth(sparse_d, kernel)
(_, _, weights) = _kernel(sparse_d, metric='precomputed', kernel=kernel, bandwidth=bandwidth, taper=False)
adjtable = pandas.DataFrame.from_dict({'focal': heads, 'neighbor': tails, 'weight': weights})
if ((n_coincident > 0) & (coincident == 'clique')):
if (kernel is None):
fill_value = 1
else:
fill_value = _kernel_functions[kernel](numpy.array([0]), bandwidth).item()
adjtable = _induce_cliques(adjtable, coincident_lut, fill_value=fill_value)
(coordinates, ids, geoms) = (input_coordinates, input_ids, input_geoms)
(heads, tails, weights) = adjtable.values.T
adjtable = _reorder_adjtable_by_ids(adjtable, ids)
return (adjtable.focal.values, adjtable.neighbor.values, adjtable.weight.values)
return tri_with_validation |
def socket_level_mapping(t: int, archtype: QL_ARCH) -> str:
socket_level_map = {QL_ARCH.X86: linux_x86_socket_level, QL_ARCH.X8664: linux_x86_socket_level, QL_ARCH.ARM: linux_arm_socket_level, QL_ARCH.ARM64: linux_arm_socket_level, QL_ARCH.MIPS: linux_mips_socket_level}[archtype]
return _constant_mapping(t, socket_level_map) |
def get_current_dir():
data = config.getbytes('memory', 'chooser_dir', b'')
try:
path = (bytes2fsn(data, 'utf-8') or None)
except ValueError:
path = None
if (path is not None):
path = find_nearest_dir(path)
if (path is None):
path = get_home_dir()
return path |
def main():
parser = argparse.ArgumentParser(description='Release an OpenNMT-py model for inference')
parser.add_argument('--model', '-m', help='The model path', required=True)
parser.add_argument('--output', '-o', help='The output path', required=True)
parser.add_argument('--format', choices=['pytorch', 'ctranslate2'], default='pytorch', help='The format of the released model')
parser.add_argument('--quantization', '-q', choices=['int8', 'int16'], default=None, help='Quantization type for CT2 model.')
opt = parser.parse_args()
model = torch.load(opt.model)
if (opt.format == 'pytorch'):
model['optim'] = None
torch.save(model, opt.output)
elif (opt.format == 'ctranslate2'):
model_spec = get_ctranslate2_model_spec(model['opt'])
if (model_spec is None):
raise ValueError('This model is not supported by CTranslate2. Go to for more information on supported models.')
import ctranslate2
converter = ctranslate2.converters.OpenNMTPyConverter(opt.model)
converter.convert(opt.output, model_spec, force=True, quantization=opt.quantization) |
('--user', '-u', default='reanahub', help='DockerHub user name [reanahub]')
('--tag', '-t', default='latest', help='Image tag [latest]')
('--component', '-c', multiple=True, default=['CLUSTER'], help='Which components? [name|CLUSTER|DEMO]')
_commands.command(name='docker-pull')
def docker_pull(user, tag, component):
components = select_components(component)
for component in components:
if (component in DOCKER_PREFETCH_IMAGES):
for image in DOCKER_PREFETCH_IMAGES[component]:
cmd = 'docker pull {0}'.format(image)
run_command(cmd, component)
elif is_component_dockerised(component):
cmd = 'docker pull {0}/{1}:{2}'.format(user, component, tag)
run_command(cmd, component)
else:
msg = 'Ignoring this component that does not contain a Dockerfile.'
display_message(msg, component) |
def filter(example, uniques, args):
if (not check_uniques(example, uniques)):
return False
elif example['autogenerated']:
return False
elif (example['line_max'] > args.line_max):
return False
elif (example['line_mean'] > args.line_mean):
return False
elif (example['alpha_frac'] < args.alpha_frac):
return False
else:
return True |
def test_adjust_max():
candidates = CompletedKeys(10)
assert (candidates.num_remaining == 10)
assert (len(candidates._slabs) == 0)
assert candidates.is_available(9)
candidates.mark_completed(5, 12)
assert (len(candidates._slabs) == 0)
assert (candidates.num_remaining == 5)
assert (not candidates.is_available(9))
assert candidates.is_available(4) |
def bmm_flop_jit(inputs, outputs):
input_shapes = [get_shape(v) for v in inputs]
assert (len(input_shapes[0]) == 3)
assert (len(input_shapes[1]) == 3)
(T, batch_size, input_dim) = input_shapes[0]
output_dim = input_shapes[1][2]
flop = (((T * batch_size) * input_dim) * output_dim)
flop_counter = Counter({'bmm': flop})
return flop_counter |
.slow
def test_hamiltonian_taking_arguments():
N = 10
w0 = ((1.0 * 2) * np.pi)
g = ((0.75 * 2) * np.pi)
kappa = 0.05
a = qutip.tensor(qutip.destroy(N), qutip.qeye(2))
sp = qutip.tensor(qutip.qeye(N), qutip.sigmap())
psi0 = qutip.tensor(qutip.basis(N, 1), qutip.basis(2, 0))
psi0 = qutip.ket2dm(psi0)
times = np.linspace(0, (((5 * 2) * np.pi) / g), 1000)
a_ops = [[(a + a.dag()), '{kappa}*(w > 0)'.format(kappa=kappa)]]
e_ops = [(a.dag() * a), (sp.dag() * sp)]
H = ((((w0 * a.dag()) * a) + ((w0 * sp.dag()) * sp)) + ((g * (a + a.dag())) * (sp + sp.dag())))
args = {'ii': 1}
no_args = brmesolve(H, psi0, times, a_ops, e_ops)
args = brmesolve([[H, 'ii']], psi0, times, a_ops, e_ops, args=args)
for (arg, no_arg) in zip(args.expect, no_args.expect):
np.testing.assert_allclose(arg, no_arg, atol=1e-10) |
def filter_latest_pkgs(pkgs):
pkgname2latest = {}
for x in pkgs:
pkgname = normalize_pkgname(x.pkgname)
if (pkgname not in pkgname2latest):
pkgname2latest[pkgname] = x
elif (x.parsed_version > pkgname2latest[pkgname].parsed_version):
pkgname2latest[pkgname] = x
return pkgname2latest.values() |
class LegalClause(OrderedModel):
internal_name = models.CharField(max_length=1024, verbose_name='Internal Name', help_text='Friendly name used internally by PSF to reference this clause', blank=False)
clause = models.TextField(verbose_name='Clause', help_text='Legal clause text to be added to contract', blank=False)
notes = models.TextField(verbose_name='Notes', help_text='PSF staff notes', blank=True, default='')
def __str__(self):
return f'Clause: {self.internal_name}'
def clone(self):
return LegalClause.objects.create(internal_name=self.internal_name, clause=self.clause, notes=self.notes, order=self.order)
class Meta(OrderedModel.Meta):
pass |
.parametrize('username,password', users)
.parametrize('membership_id', memberships)
def test_delete(db, client, username, password, membership_id):
client.login(username=username, password=password)
url = reverse(urlnames['detail'], args=[membership_id])
response = client.delete(url)
if password:
assert (response.status_code == 405)
else:
assert (response.status_code == 401) |
def reorder_train_deterministic(dataset):
assert isinstance(dataset, torchvision.datasets.STL10)
assert (dataset.split == 'train+unlabeled')
assert (dataset.data.shape == (105000, 3, 96, 96))
ids = []
for i in xrange(5000):
ids.append(i)
ids += range((5000 + (i * 20)), (5000 + ((i + 1) * 20)))
dataset.data = dataset.data[ids]
assert (dataset.data.shape == (105000, 3, 96, 96))
dataset.labels = dataset.labels[ids]
assert (dataset.labels.shape == (105000,))
return dataset |
def test_simulate_genotype_call_dataset__phased(tmp_path):
ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10, phased=True)
assert ('call_genotype_phased' in ds)
assert np.all(ds['call_genotype_phased'])
ds = simulate_genotype_call_dataset(n_variant=10, n_sample=10, phased=False)
assert ('call_genotype_phased' in ds)
assert (not np.any(ds['call_genotype_phased'])) |
class MainWindow(QMainWindow):
signal_close = Signal()
signal_gesture = Signal(QtCore.QEvent)
def __init__(self, parent: Optional[QWidget]=None, title: Optional[str]=None, size: Optional[Tuple[(int, int)]]=None) -> None:
QMainWindow.__init__(self, parent=parent)
if (title is not None):
self.setWindowTitle(title)
if (size is not None):
self.resize(*size)
def event(self, event: QtCore.QEvent) -> bool:
if (event.type() == QtCore.QEvent.Gesture):
self.signal_gesture.emit(event)
return True
return super().event(event)
def closeEvent(self, event: QtCore.QEvent) -> None:
self.signal_close.emit()
event.accept() |
def convert_pascal_berkeley_augmented_mat_annotations_to_png(pascal_berkeley_augmented_root):
import scipy.io
def read_class_annotation_array_from_berkeley_mat(mat_filename, key='GTcls'):
mat = scipy.io.loadmat(mat_filename, mat_dtype=True, squeeze_me=True, struct_as_record=False)
return mat[key].Segmentation
mat_file_extension_string = '.mat'
png_file_extension_string = '.png'
relative_path_to_annotation_mat_files = 'dataset/cls'
relative_path_to_annotation_png_files = 'dataset/cls_png'
mat_file_extension_string_length = len(mat_file_extension_string)
annotation_mat_files_fullpath = os.path.join(pascal_berkeley_augmented_root, relative_path_to_annotation_mat_files)
annotation_png_save_fullpath = os.path.join(pascal_berkeley_augmented_root, relative_path_to_annotation_png_files)
if (not os.path.exists(annotation_png_save_fullpath)):
os.makedirs(annotation_png_save_fullpath)
else:
return
mat_files_names = os.listdir(annotation_mat_files_fullpath)
for current_mat_file_name in mat_files_names:
current_file_name_without_extention = current_mat_file_name[:(- mat_file_extension_string_length)]
current_mat_file_full_path = os.path.join(annotation_mat_files_fullpath, current_mat_file_name)
current_png_file_full_path_to_be_saved = os.path.join(annotation_png_save_fullpath, current_file_name_without_extention)
current_png_file_full_path_to_be_saved += png_file_extension_string
annotation_array = read_class_annotation_array_from_berkeley_mat(current_mat_file_full_path)
io.imsave(current_png_file_full_path_to_be_saved, annotation_array) |
class VGG(nn.Module):
def __init__(self, features, num_classes=11):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(p=0.9), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(p=0.9))
self.fc_action = nn.Linear(4096, num_classes)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
x = self.fc_action(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() |
class TestResolver():
def test_resolve_setup_path_cwd(self):
assert (develop._resolve_setup_path('.', '.', '.') == '.')
def test_resolve_setup_path_one_dir(self):
assert (develop._resolve_setup_path('pkgs', '.', 'pkgs') == '../')
def test_resolve_setup_path_one_dir_trailing_slash(self):
assert (develop._resolve_setup_path('pkgs/', '.', 'pkgs') == '../') |
class TestCauchy(BaseTestDistributionRandom):
pymc_dist = pm.Cauchy
pymc_dist_params = {'alpha': 2.0, 'beta': 5.0}
expected_rv_op_params = {'alpha': 2.0, 'beta': 5.0}
reference_dist_params = {'loc': 2.0, 'scale': 5.0}
reference_dist = seeded_scipy_distribution_builder('cauchy')
checks_to_run = ['check_pymc_params_match_rv_op', 'check_pymc_draws_match_reference'] |
class FifoTransactionManager(ModbusTransactionManager):
def __init__(self, client, **kwargs):
super().__init__(client, **kwargs)
self.transactions = []
def __iter__(self):
return iter(self.transactions)
def addTransaction(self, request, tid=None):
tid = (tid if (tid is not None) else request.transaction_id)
Log.debug('Adding transaction {}', tid)
self.transactions.append(request)
def getTransaction(self, tid):
return (self.transactions.pop(0) if self.transactions else None)
def delTransaction(self, tid):
Log.debug('Deleting transaction {}', tid)
if self.transactions:
self.transactions.pop(0) |
class Visualizer():
def __init__(self, opt, name='train'):
self.logger = tf_logger.Logger(os.path.join(opt.log_dir, name))
self.log_name = os.path.join(opt.log_dir, 'tf_visualizer_log.txt')
with open(self.log_name, 'a') as log_file:
now = time.strftime('%c')
log_file.write((' Training Loss (%s) \n' % now))
def log_images(self, visuals, step):
for (label, image_numpy) in visuals.items():
self.logger.image_summary(label, [image_numpy], step)
def log_scalars(self, scalars, step):
for (label, val) in scalars.items():
self.logger.scalar_summary(label, val, step)
def plot_current_points(self, points, disp_offset=10):
pass
def print_current_scalars(self, epoch, i, scalars):
message = ('(epoch: %d, iters: %d) ' % (epoch, i))
for (k, v) in scalars.items():
message += ('%s: %.3f ' % (k, v))
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message)) |
def gen_pickle(split='val', root='ScanNet'):
if (split == 'test'):
root = (root + '/scans_test')
else:
root = (root + '/scans')
file_list = ('scannetv2_%s.txt' % split)
with open(file_list) as fl:
scene_id = fl.read().splitlines()
scene_data = []
scene_data_labels = []
scene_data_id = []
scene_data_num = []
label_map = gen_label_map()
for i in range(len(scene_id)):
print('process...', i)
scene_namergb = os.path.join(root, scene_id[i], (scene_id[i] + '_vh_clean_2.ply'))
scene_xyzlabelrgb = PlyData.read(scene_namergb)
scene_vertex_rgb = scene_xyzlabelrgb['vertex']
scene_data_tmp = np.stack((scene_vertex_rgb['x'], scene_vertex_rgb['y'], scene_vertex_rgb['z'], scene_vertex_rgb['red'], scene_vertex_rgb['green'], scene_vertex_rgb['blue']), axis=(- 1)).astype(np.float32)
scene_points_num = scene_data_tmp.shape[0]
scene_point_id = np.array([c for c in range(scene_points_num)])
if (split != 'test'):
scene_name = os.path.join(root, scene_id[i], (scene_id[i] + '_vh_clean_2.labels.ply'))
scene_xyzlabel = PlyData.read(scene_name)
scene_vertex = scene_xyzlabel['vertex']
scene_data_label_tmp = scene_vertex['label']
(scene_data_tmp, scene_data_label_tmp, scene_point_id_tmp) = remove_unano(scene_data_tmp, scene_data_label_tmp, scene_point_id)
else:
scene_data_label_tmp = np.zeros(scene_data_tmp.shape[0]).astype(np.int32)
scene_point_id_tmp = scene_point_id
scene_data_label_tmp = label_map[scene_data_label_tmp]
scene_data.append(scene_data_tmp)
scene_data_labels.append(scene_data_label_tmp)
scene_data_id.append(scene_point_id_tmp)
scene_data_num.append(scene_points_num)
pickle_out = open(('scannet_%s_rgb21c_pointid.pickle' % split), 'wb')
pickle.dump(scene_data, pickle_out, protocol=0)
pickle.dump(scene_data_labels, pickle_out, protocol=0)
pickle.dump(scene_data_id, pickle_out, protocol=0)
pickle.dump(scene_data_num, pickle_out, protocol=0)
pickle_out.close() |
def lines2dictlist(lines, format):
lines = [x.split() for x in lines]
if (format == 'rawframes'):
data = [dict(frame_dir=line[0], total_frames=int(line[1]), label=[int(x) for x in line[2:]]) for line in lines]
elif (format == 'videos'):
data = [dict(filename=line[0], label=[int(x) for x in line[1:]]) for line in lines]
return data |
def objective(trial):
k_neighbours = trial.suggest_int('k_neighbours', 5, 20)
frac_samples = (2 ** trial.suggest_int('frac_samples', (- 2), 3))
frac_lam_del = trial.suggest_float('frac_lam_del', 0.0, 0.95, step=0.05)
score = 0.0
with tempfile.TemporaryDirectory() as dir_:
dir_ = Path(dir_)
for seed in range(5):
sample_smote(parent_dir=dir_, real_data_path=real_data_path, eval_type=eval_type, frac_samples=frac_samples, frac_lam_del=frac_lam_del, k_neighbours=k_neighbours, change_val=True, seed=seed)
T_dict = {'seed': 0, 'normalization': None, 'num_nan_policy': None, 'cat_nan_policy': None, 'cat_min_frequency': None, 'cat_encoding': None, 'y_policy': 'default'}
metrics = train_catboost(parent_dir=dir_, real_data_path=real_data_path, eval_type=eval_type, T_dict=T_dict, change_val=True, seed=0)
score += metrics.get_val_score()
return (score / 5) |
def compute_mul(tree):
(neg, inputs) = tree
if (inputs is None):
raise AssertionError('Function `compute_mul` found a missing leaf, did you forget to call `simplify_mul` on the tree first?')
elif isinstance(inputs, list):
rval = mul(*list(map(compute_mul, inputs)))
else:
rval = inputs
if neg:
rval = (- rval)
return rval |
class BaseDpEmbeddingSharding(EmbeddingSharding[(C, F, T, W)]):
def __init__(self, sharding_infos: List[EmbeddingShardingInfo], env: ShardingEnv, device: Optional[torch.device]=None) -> None:
super().__init__()
self._env = env
self._device = device
self._rank: int = self._env.rank
self._world_size: int = self._env.world_size
sharded_tables_per_rank = self._shard(sharding_infos)
self._grouped_embedding_configs_per_rank: List[List[GroupedEmbeddingConfig]] = []
self._grouped_embedding_configs_per_rank = group_tables(sharded_tables_per_rank)
self._grouped_embedding_configs: List[GroupedEmbeddingConfig] = self._grouped_embedding_configs_per_rank[env.rank]
def _shard(self, sharding_infos: List[EmbeddingShardingInfo]) -> List[List[ShardedEmbeddingTable]]:
world_size = self._world_size
tables_per_rank: List[List[ShardedEmbeddingTable]] = [[] for i in range(world_size)]
for info in sharding_infos:
for rank in range(world_size):
tables_per_rank[rank].append(ShardedEmbeddingTable(num_embeddings=info.embedding_config.num_embeddings, embedding_dim=info.embedding_config.embedding_dim, name=info.embedding_config.name, embedding_names=info.embedding_config.embedding_names, data_type=info.embedding_config.data_type, feature_names=info.embedding_config.feature_names, pooling=info.embedding_config.pooling, is_weighted=info.embedding_config.is_weighted, has_feature_processor=info.embedding_config.has_feature_processor, local_rows=info.param.size(0), local_cols=info.param.size(1), compute_kernel=EmbeddingComputeKernel(info.param_sharding.compute_kernel), local_metadata=None, global_metadata=None, weight_init_max=info.embedding_config.weight_init_max, weight_init_min=info.embedding_config.weight_init_min, fused_params=info.fused_params))
return tables_per_rank
def embedding_dims(self) -> List[int]:
embedding_dims = []
for grouped_config in self._grouped_embedding_configs:
embedding_dims.extend(grouped_config.embedding_dims())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for grouped_config in self._grouped_embedding_configs:
embedding_names.extend(grouped_config.embedding_names())
return embedding_names
def embedding_names_per_rank(self) -> List[List[str]]:
raise NotImplementedError
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata = []
for grouped_config in self._grouped_embedding_configs:
embedding_shard_metadata.extend(grouped_config.embedding_shard_metadata())
return embedding_shard_metadata
def feature_names(self) -> List[str]:
feature_names = []
for grouped_config in self._grouped_embedding_configs:
feature_names.extend(grouped_config.feature_names())
return feature_names
def embedding_tables(self) -> List[ShardedEmbeddingTable]:
embedding_tables = []
for grouped_config in self._grouped_embedding_configs:
embedding_tables.extend(grouped_config.embedding_tables)
return embedding_tables |
class TestInstMonthlyCadence(TestInstCadence):
def setup_method(self):
reload(pysat.instruments.pysat_testing)
self.ref_time = pysat.instruments.pysat_testing._test_dates['']['']
self.freq = 'MS'
date_range = pds.date_range((self.ref_time - pds.DateOffset(years=1)), (self.ref_time + pds.DateOffset(years=2, days=(- 1))), freq=self.freq)
self.testInst = pysat.Instrument(platform='pysat', name='testing', num_samples=10, clean_level='clean', update_files=True, use_header=True, file_date_range=date_range, **self.testing_kwargs)
self.ref_doy = int(self.ref_time.strftime('%j'))
self.out = None
return
def teardown_method(self):
del self.testInst, self.out, self.ref_time, self.ref_doy, self.freq
return |
def assert_condition_check_fails():
try:
(yield)
except (PutError, UpdateError, DeleteError) as e:
assert isinstance(e.cause, ClientError)
assert (e.cause_response_code == 'ConditionalCheckFailedException')
except TransactWriteError as e:
assert isinstance(e.cause, ClientError)
assert (e.cause_response_code == 'TransactionCanceledException')
assert (e.cause_response_message is not None)
assert ('ConditionalCheckFailed' in e.cause_response_message)
else:
raise AssertionError('The version attribute conditional check should have failed.') |
class OCIConfig(object):
METASCHEMA = {'type': 'object', 'description': 'The container configuration found in an OCI manifest', 'required': [CONFIG_ROOTFS_KEY, CONFIG_ARCHITECTURE_KEY, CONFIG_OS_KEY], 'properties': {CONFIG_CREATED_KEY: {'type': ['string', 'null'], 'description': 'An combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6.'}, CONFIG_AUTHOR_KEY: {'type': ['string', 'null'], 'description': 'Gives the name and/or email address of the person or entity which created and is responsible for maintaining the image.'}, CONFIG_ARCHITECTURE_KEY: {'type': 'string', 'description': 'The CPU architecture which the binaries in this image are built to run on. Configurations SHOULD use, and implementations SHOULD understand, values listed in the Go Language document for GOARCH.'}, CONFIG_OS_KEY: {'type': 'string', 'description': 'The name of the operating system which the image is built to run on. Configurations SHOULD use, and implementations SHOULD understand, values listed in the Go Language document for GOOS.'}, CONFIG_CONFIG_KEY: {'type': ['object', 'null'], 'description': 'The execution parameters which SHOULD be used as a base when running a container using the image', 'properties': {'User': {'type': ['string', 'null']}, 'ExposedPorts': {'type': ['object', 'null']}, 'Env': {'type': ['array', 'null']}, 'Entrypoint': {'type': ['array', 'null']}, 'Cmd': {'type': ['array', 'null']}, 'Volumes': {'type': ['object', 'null']}, 'WorkingDir': {'type': ['string', 'null']}, 'Labels': {'type': ['object', 'null']}, 'StopSignal': {'type': ['string', 'null']}}, 'additionalProperties': True}, CONFIG_ROOTFS_KEY: {'type': 'object', 'description': 'Describes the root filesystem for this image', 'properties': {CONFIG_TYPE_KEY: {'type': 'string', 'description': 'MUST be set to layers.', 'enum': ['layers']}, CONFIG_DIFF_IDS_KEY: {'type': 'array', 'description': 'An array of layer content hashes (DiffIDs), in order from first to last.', 'items': {'type': 'string'}}}, 'required': [CONFIG_TYPE_KEY, CONFIG_DIFF_IDS_KEY], 'additionalProperties': True}, CONFIG_HISTORY_KEY: {'type': 'array', 'description': 'Describes the history of each layer. The array is ordered from first to last', 'items': {'type': 'object', 'properties': {CONFIG_EMPTY_LAYER_KEY: {'type': 'boolean', 'description': 'If present, this layer is empty'}, CONFIG_CREATED_KEY: {'type': 'string', 'description': 'The date/time that the layer was created', 'format': 'date-time', 'x-example': '2018-04-03T18:37:09.Z'}, CONFIG_CREATED_BY_KEY: {'type': 'string', 'description': 'The command used to create the layer', 'x-example': '/bin/sh -c #(nop) ADD file:somesha in /'}, CONFIG_COMMENT_KEY: {'type': 'string', 'description': 'Comment describing the layer'}, CONFIG_AUTHOR_KEY: {'type': 'string', 'description': 'The author of the layer'}}, 'additionalProperties': True}}}, 'additionalProperties': True}
def __init__(self, config_bytes):
assert isinstance(config_bytes, Bytes)
self._config_bytes = config_bytes
try:
self._parsed = json.loads(config_bytes.as_unicode())
except ValueError as ve:
raise MalformedConfig(('malformed config data: %s' % ve))
try:
validate_schema(self._parsed, OCIConfig.METASCHEMA)
except ValidationError as ve:
raise MalformedConfig(('config data does not match schema: %s' % ve))
def digest(self):
return digest_tools.sha256_digest(self._config_bytes.as_encoded_str())
def size(self):
return len(self._config_bytes.as_encoded_str())
def bytes(self):
return self._config_bytes
def labels(self):
return (self._parsed.get('config', {}).get('Labels', {}) or {})
def has_empty_layer(self):
history = (self._parsed.get(CONFIG_HISTORY_KEY) or [])
for history_entry in history:
if history_entry.get(CONFIG_EMPTY_LAYER_KEY, False):
return True
return False
def history(self):
history = (self._parsed.get(CONFIG_HISTORY_KEY) or [])
for history_entry in history:
created_datetime_str = history_entry.get(CONFIG_CREATED_KEY)
created_datetime = (parse_date(created_datetime_str) if created_datetime_str else None)
(yield LayerHistory(created_datetime=created_datetime, created=history_entry.get(CONFIG_CREATED_KEY), command=history_entry.get(CONFIG_CREATED_BY_KEY), author=history_entry.get(CONFIG_AUTHOR_KEY), comment=history_entry.get(CONFIG_COMMENT_KEY), is_empty=history_entry.get(CONFIG_EMPTY_LAYER_KEY, False), raw_entry=history_entry))
def synthesized_history(self):
created_datetime_str = self._parsed.get(CONFIG_CREATED_KEY)
created_datetime = (parse_date(created_datetime_str) if created_datetime_str else None)
config = (self._parsed.get(CONFIG_CONFIG_KEY) or {})
return LayerHistory(created_datetime=created_datetime, created=created_datetime_str, command=config.get('Cmd', None), author=self._parsed.get(CONFIG_AUTHOR_KEY, None), comment=None, is_empty=False, raw_entry=None)
def build_v1_compatibility(self, history, v1_id, v1_parent_id, is_leaf, compressed_size=None):
v1_compatibility = (copy.deepcopy(self._parsed) if is_leaf else {})
v1_compatibility['id'] = v1_id
if (v1_parent_id is not None):
v1_compatibility['parent'] = v1_parent_id
if (history is not None):
if (('created' not in v1_compatibility) and history.created):
v1_compatibility['created'] = history.created
if (('author' not in v1_compatibility) and history.author):
v1_compatibility['author'] = history.author
if (('comment' not in v1_compatibility) and history.comment):
v1_compatibility['comment'] = history.comment
if (('throwaway' not in v1_compatibility) and history.is_empty):
v1_compatibility['throwaway'] = True
if ('container_config' not in v1_compatibility):
v1_compatibility['container_config'] = {'Cmd': [history.command]}
if (compressed_size is not None):
v1_compatibility['Size'] = compressed_size
v1_compatibility.pop(CONFIG_HISTORY_KEY, None)
v1_compatibility.pop(CONFIG_ROOTFS_KEY, None)
return v1_compatibility |
(mass=ShowInInspector(float, 100), inertia=ShowInInspector(float, (200 / 3)))
class Rigidbody(Component):
velocity = ShowInInspector(Vector3)
rotVel = ShowInInspector(Vector3, None, 'Rotational Velocity')
force = ShowInInspector(Vector3)
torque = ShowInInspector(Vector3)
gravity = ShowInInspector(bool, True)
physicMaterial = ShowInInspector(PhysicMaterial, PhysicMaterial(immutable=True))
def __init__(self):
super(Rigidbody, self).__init__()
self.mass = 100
self.velocity = Vector3.zero()
self.rotVel = Vector3.zero()
self.force = Vector3.zero()
self.torque = Vector3.zero()
def mass(self):
if (self.invMass == 0):
return Infinity
return (1 / self.invMass)
def mass(self, val):
if ((val == Infinity) or (val == 0)):
self.invMass = 0
else:
self.invMass = (1 / val)
self.inertia = ((2 / 3) * self.mass)
def inertia(self):
if (self.invInertia == 0):
return Infinity
return (1 / self.invInertia)
def inertia(self, val):
if ((val == Infinity) or (val == 0)):
self.invInertia = 0
self.invInertia = (1 / val)
def pos(self):
return self.transform.position
def pos(self, val):
self.transform.position = val
def rot(self):
return self.transform.rotation
def rot(self, val):
self.transform.rotation = val
def Move(self, dt):
if (self.gravity and (self.invMass > 0)):
self.force += (config.gravity * self.mass)
self.velocity += ((self.force * self.invMass) * dt)
self.pos += (self.velocity * dt)
self.rotVel += (self.torque * self.invInertia)
rotation = (self.rotVel * dt)
angle = rotation.length
if (angle != 0):
rotation /= angle
rotQuat = Quaternion.FromAxis(math.degrees(angle), rotation)
self.rot *= rotQuat
self.force = Vector3.zero()
self.torque = Vector3.zero()
def MovePos(self, offset):
self.pos += offset
def AddForce(self, force, point=Vector3.zero()):
self.force += force
self.torque += point.cross(force)
def AddImpulse(self, impulse):
self.velocity += impulse |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data-root', help='directory to search for JSON files')
parser.add_argument('-v', '--verbose', type=int, help='increase output verbosity')
args = parser.parse_args()
check_filename_length(args.data_root, verbose=args.verbose) |
class CatalogQuestionSet(CurrentSiteQuerySetMixin, GroupsQuerySetMixin, AvailabilityQuerySetMixin, models.QuerySet):
def filter_catalog(self, catalog):
return self.filter((models.Q(catalogs=None) | models.Q(catalogs=catalog)))
def prefetch_elements(self):
return self.prefetch_related(*self.model.prefetch_lookups) |
def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, orig_cpu, dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) |
def get_process_result_dict(result, config_idx, mode='Train'):
result_dict = {'Env': result['Env'][0], 'Agent': result['Agent'][0], 'Config Index': config_idx, 'Return (mean)': (result['Return'][(- 100):].mean(skipna=False) if (mode == 'Train') else result['Return'][(- 5):].mean(skipna=False))}
return result_dict |
class DwsConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super(DwsConv, self).__init__()
self.dw_conv = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels, bias=False)
self.pw_conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=False)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x |
class RouteState(MetadataValidation, State):
route: List[Address]
address_to_metadata: Dict[(Address, AddressMetadata)] = field(default_factory=dict)
swaps: Dict[(Address, TokenNetworkAddress)] = field(default_factory=dict)
estimated_fee: FeeAmount = FeeAmount(0)
def __post_init__(self) -> None:
validation_errors = self.validate_address_metadata()
if validation_errors:
addresses_with_errors = ', '.join((f'{to_checksum_address(address)}: {errors}' for (address, errors) in validation_errors.items()))
raise ValueError(f'Could not validate metadata {addresses_with_errors}.')
def get_metadata(self) -> Optional[Dict[(Address, AddressMetadata)]]:
return self.address_to_metadata
def hop_after(self, address: Address) -> Optional[Address]:
try:
idx = self.route.index(address)
return self.route[(idx + 1)]
except (ValueError, IndexError):
return None
def __repr__(self) -> str:
return 'RouteState ({}), fee: {}'.format(' -> '.join((to_checksum_address(addr) for addr in self.route)), self.estimated_fee) |
def eval_triangles(x: wp.array(dtype=wp.vec3), v: wp.array(dtype=wp.vec3), indices: wp.array2d(dtype=int), pose: wp.array(dtype=wp.mat22), activation: wp.array(dtype=float), materials: wp.array2d(dtype=float), f: wp.array(dtype=wp.vec3)):
tid = wp.tid()
k_mu = materials[(tid, 0)]
k_lambda = materials[(tid, 1)]
k_damp = materials[(tid, 2)]
k_drag = materials[(tid, 3)]
k_lift = materials[(tid, 4)]
i = indices[(tid, 0)]
j = indices[(tid, 1)]
k = indices[(tid, 2)]
x0 = x[i]
x1 = x[j]
x2 = x[k]
v0 = v[i]
v1 = v[j]
v2 = v[k]
x10 = (x1 - x0)
x20 = (x2 - x0)
v10 = (v1 - v0)
v20 = (v2 - v0)
Dm = pose[tid]
inv_rest_area = (wp.determinant(Dm) * 2.0)
rest_area = (1.0 / inv_rest_area)
k_mu = (k_mu * rest_area)
k_lambda = (k_lambda * rest_area)
k_damp = (k_damp * rest_area)
F1 = ((x10 * Dm[(0, 0)]) + (x20 * Dm[(1, 0)]))
F2 = ((x10 * Dm[(0, 1)]) + (x20 * Dm[(1, 1)]))
dFdt1 = ((v10 * Dm[(0, 0)]) + (v20 * Dm[(1, 0)]))
dFdt2 = ((v10 * Dm[(0, 1)]) + (v20 * Dm[(1, 1)]))
P1 = ((F1 * k_mu) + (dFdt1 * k_damp))
P2 = ((F2 * k_mu) + (dFdt2 * k_damp))
f1 = ((P1 * Dm[(0, 0)]) + (P2 * Dm[(0, 1)]))
f2 = ((P1 * Dm[(1, 0)]) + (P2 * Dm[(1, 1)]))
alpha = (1.0 + (k_mu / k_lambda))
n = wp.cross(x10, x20)
area = (wp.length(n) * 0.5)
act = activation[tid]
c = (((area * inv_rest_area) - alpha) + act)
n = wp.normalize(n)
dcdq = ((wp.cross(x20, n) * inv_rest_area) * 0.5)
dcdr = ((wp.cross(n, x10) * inv_rest_area) * 0.5)
f_area = (k_lambda * c)
dcdt = ((dot(dcdq, v1) + dot(dcdr, v2)) - dot((dcdq + dcdr), v0))
f_damp = (k_damp * dcdt)
f1 = (f1 + (dcdq * (f_area + f_damp)))
f2 = (f2 + (dcdr * (f_area + f_damp)))
f0 = (f1 + f2)
vmid = (((v0 + v1) + v2) * 0.3333)
vdir = wp.normalize(vmid)
f_drag = (vmid * ((k_drag * area) * wp.abs(wp.dot(n, vmid))))
f_lift = ((n * ((k_lift * area) * (1.57079 - wp.acos(wp.dot(n, vdir))))) * dot(vmid, vmid))
f0 = ((f0 - f_drag) - f_lift)
f1 = ((f1 + f_drag) + f_lift)
f2 = ((f2 + f_drag) + f_lift)
wp.atomic_add(f, i, f0)
wp.atomic_sub(f, j, f1)
wp.atomic_sub(f, k, f2) |
def show_performance_comparison(pos_base, neg_base, pos_ours, neg_ours, baseline_name='Baseline', method_name='Ours', recall_level=recall_level_default):
(auroc_base, aupr_base, fpr_base) = get_measures(pos_base[:], neg_base[:], recall_level)
(auroc_ours, aupr_ours, fpr_ours) = get_measures(pos_ours[:], neg_ours[:], recall_level)
logging.info(((('\t\t\t' + baseline_name) + '\t') + method_name))
logging.info('FPR{:d}:\t\t\t{:.2f}\t\t{:.2f}'.format(int((100 * recall_level)), (100 * fpr_base), (100 * fpr_ours)))
logging.info('AUROC:\t\t\t{:.2f}\t\t{:.2f}'.format((100 * auroc_base), (100 * auroc_ours)))
logging.info('AUPR:\t\t\t{:.2f}\t\t{:.2f}'.format((100 * aupr_base), (100 * aupr_ours))) |
def main(args):
utils.import_user_module(args)
print(args)
os.makedirs(args.destdir, exist_ok=True)
target = (not args.only_source)
task = tasks.get_task(args.task)
def train_path(lang):
return '{}{}'.format(args.trainpref, (('.' + lang) if lang else ''))
def file_name(prefix, lang):
fname = prefix
if (lang is not None):
fname += '.{lang}'.format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return (dest_path('dict', lang) + '.txt')
def build_dictionary(filenames, src=False, tgt=False):
assert (src ^ tgt)
return task.build_dictionary(filenames, workers=args.workers, threshold=(args.thresholdsrc if src else args.thresholdtgt), nwords=(args.nwordssrc if src else args.nwordstgt), padding_factor=args.padding_factor)
if ((not args.srcdict) and os.path.exists(dict_path(args.source_lang))):
raise FileExistsError(dict_path(args.source_lang))
if (target and (not args.tgtdict) and os.path.exists(dict_path(args.target_lang))):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert ((not args.srcdict) or (not args.tgtdict)), 'cannot use both --srcdict and --tgtdict with --joined-dictionary'
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, '--trainpref must be set if --srcdict is not specified'
src_dict = build_dictionary({train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert args.trainpref, '--trainpref must be set if --srcdict is not specified'
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, '--trainpref must be set if --tgtdict is not specified'
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if (target and (tgt_dict is not None)):
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
print('| [{}] Dictionary: {} types'.format(lang, (len(vocab) - 1)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result['replaced'])
n_seq_tok[0] += worker_result['nseq']
n_seq_tok[1] += worker_result['ntok']
input_file = '{}{}'.format(input_prefix, (('.' + lang) if (lang is not None) else ''))
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if (num_workers > 1):
pool = Pool(processes=(num_workers - 1))
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
pool.apply_async(binarize, (args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab))
merge_result(Binarizer.binarize(input_file, vocab, (lambda t: ds.add_item(t)), offset=0, end=offsets[1]))
if (num_workers > 1):
pool.join()
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(lang, input_file, n_seq_tok[0], n_seq_tok[1], ((100 * sum(replaced.values())) / n_seq_tok[1]), vocab.unk_word))
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result['nseq']
input_file = input_prefix
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if (num_workers > 1):
pool = Pool(processes=(num_workers - 1))
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
pool.apply_async(binarize_alignments, (args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl)
merge_result(Binarizer.binarize_alignments(input_file, utils.parse_alignment, (lambda t: ds.add_item(t)), offset=0, end=offsets[1]))
if (num_workers > 1):
pool.join()
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx'))
print('| [alignments] {}: parsed {} alignments'.format(input_file, nseq[0]))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if (args.dataset_impl == 'raw'):
output_text_file = dest_path((output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang)), lang)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, 'train', lang, num_workers=args.workers)
if args.validpref:
for (k, validpref) in enumerate(args.validpref.split(',')):
outprefix = ('valid{}'.format(k) if (k > 0) else 'valid')
make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers)
if args.testpref:
for (k, testpref) in enumerate(args.testpref.split(',')):
outprefix = ('test{}'.format(k) if (k > 0) else 'test')
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if (args.trainpref and os.path.exists(((args.trainpref + '.') + args.align_suffix))):
make_binary_alignment_dataset(((args.trainpref + '.') + args.align_suffix), 'train.align', num_workers=args.workers)
if (args.validpref and os.path.exists(((args.validpref + '.') + args.align_suffix))):
make_binary_alignment_dataset(((args.validpref + '.') + args.align_suffix), 'valid.align', num_workers=args.workers)
if (args.testpref and os.path.exists(((args.testpref + '.') + args.align_suffix))):
make_binary_alignment_dataset(((args.testpref + '.') + args.align_suffix), 'test.align', num_workers=args.workers)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
print('| Wrote preprocessed data to {}'.format(args.destdir))
if args.alignfile:
assert args.trainpref, '--trainpref must be set if --alignfile is specified'
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, 'r', encoding='utf-8') as align_file:
with open(src_file_name, 'r', encoding='utf-8') as src_file:
with open(tgt_file_name, 'r', encoding='utf-8') as tgt_file:
for (a, s, t) in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map((lambda x: tuple(x.split('-'))), a.split()))
for (sai, tai) in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if ((srcidx != src_dict.unk()) and (tgtidx != tgt_dict.unk())):
assert (srcidx != src_dict.pad())
assert (srcidx != src_dict.eos())
assert (tgtidx != tgt_dict.pad())
assert (tgtidx != tgt_dict.eos())
if (srcidx not in freq_map):
freq_map[srcidx] = {}
if (tgtidx not in freq_map[srcidx]):
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(args.source_lang, args.target_lang)), 'w', encoding='utf-8') as f:
for (k, v) in align_dict.items():
print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f) |
class ConsoleReporter(Reporter):
report_on_success: bool
file: Optional[IO[Text]] = sys.stdout
def report_test(self, test: Test[(Diff[protocol.JsonLike], bytes)]):
click.echo('Trace:', file=self.file)
for (i, transition) in enumerate(test.transitions):
click.echo(element_heading(f'''
Transition #{i}'''), file=self.file)
click.echo(f'''
Actions and events:''', file=self.file)
for action in transition.actions:
click.echo(f'''
- {printer.pretty_print_action(action)}''', file=self.file)
if isinstance(transition, StateTransition):
click.echo(f'''
State difference:''', file=self.file)
print_state_diff(transition, file=self.file)
elif isinstance(transition, ErrorTransition):
click.echo('\nError:\n', file=self.file)
click.echo(errored(transition.error), file=self.file)
def report(self, result: PlainResult):
if isinstance(result, Failed):
diffed_test = diff_test(result.failed_test)
self.report_test(diffed_test)
elif isinstance(result, Errored):
diffed_test = diff_test(result.errored_test)
self.report_test(diffed_test)
elif (isinstance(result, Passed) and self.report_on_success):
for test in result.passed_tests:
diffed_test = diff_test(test)
self.report_test(diffed_test) |
def generate_fswap_unitaries(swap_pairs: List[List[Tuple]], dimension: int):
swap_unitaries = []
for swap_tuples in swap_pairs:
generator = np.zeros((dimension, dimension), dtype=np.complex128)
for (i, j) in swap_tuples:
generator[(i, i)] = (- 1)
generator[(j, j)] = (- 1)
generator[(i, j)] = 1
generator[(j, i)] = 1
swap_unitaries.append(expm(((((- 1j) * np.pi) * generator) / 2)))
return swap_unitaries |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.