code stringlengths 281 23.7M |
|---|
def build_environment(poetry: CorePoetry, env: (Env | None)=None, io: (IO | None)=None) -> Iterator[Env]:
if ((not env) or poetry.package.build_script):
with ephemeral_environment(executable=(env.python if env else None)) as venv:
overwrite = ((io is not None) and io.output.is_decorated() and (not io.is_debug()))
if io:
if (not overwrite):
io.write_error_line('')
requires = [f'<c1>{requirement}</c1>' for requirement in poetry.pyproject.build_system.requires]
io.overwrite_error(f"<b>Preparing</b> build environment with build-system requirements {', '.join(requires)}")
venv.run_pip('install', '--disable-pip-version-check', '--ignore-installed', '--no-input', *poetry.pyproject.build_system.requires)
if overwrite:
assert (io is not None)
io.write_error_line('')
(yield venv)
else:
(yield env) |
def levenshtein(s1: str, s2: str) -> int:
if (len(s1) < len(s2)):
return levenshtein(s2, s1)
if (len(s2) == 0):
return len(s1)
previous_row = list(range((len(s2) + 1)))
for (i, c1) in enumerate(s1):
current_row = [(i + 1)]
for (j, c2) in enumerate(s2):
insertions = (previous_row[(j + 1)] + 1)
deletions = (current_row[j] + 1)
substitutions = (previous_row[j] + (c1 != c2))
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[(- 1)] |
class IMU(object):
def __init__(self, server):
self.client = pypilotClient(server)
self.multiprocessing = server.multiprocessing
if self.multiprocessing:
(self.pipe, pipe) = NonBlockingPipe('imu pipe', self.multiprocessing)
self.process = multiprocessing.Process(target=self.process, args=(pipe,), daemon=True)
self.process.start()
return
self.process = False
self.setup()
def setup(self):
self.client.watch('imu.accel.calibration')
self.client.watch('imu.compass.calibration')
self.client.watch('imu.rate')
self.gyrobias = self.client.register(SensorValue('imu.gyrobias', persistent=True))
self.error = self.client.register(StringValue('imu.error', ''))
self.lastgyrobiastime = time.monotonic()
SETTINGS_FILE = 'RTIMULib'
print((((_('Using settings file') + ' ') + SETTINGS_FILE) + '.ini'))
s = RTIMU.Settings(SETTINGS_FILE)
s.FusionType = 1
s.CompassCalValid = False
s.CompassCalEllipsoidOffset = (0, 0, 0)
s.CompassCalEllipsoidValid = True
s.MPU925xAccelFsr = 0
s.MPU925xGyroFsr = 0
rate = 100
s.MPU925xGyroAccelSampleRate = rate
s.MPU925xCompassSampleRate = rate
s.AccelCalValid = True
s.AccelCalMin = ((- 1), (- 1), (- 1))
s.AccelCalMax = (1, 1, 1)
s.GyroBiasValid = False
s.GyroBias = (0, 0, 0)
(s.KalmanRk, s.KalmanQ) = (0.002, 0.001)
self.s = s
self.imu_detect_time = 0
self.rtimu = None
self.init()
self.lastdata = False
self.rate = 10
def init(self):
t0 = time.monotonic()
self.s.IMUType = 0
if ((t0 - self.imu_detect_time) < 1):
return
self.imu_detect_time = t0
rtimu = RTIMU.RTIMU(self.s)
if (rtimu.IMUName() == 'Null IMU'):
if self.rtimu:
print(_('ERROR: No IMU Detected'), t0)
self.error.set('No IMU')
self.s.IMUType = 0
return
print(('IMU Name: ' + rtimu.IMUName()))
if (not rtimu.IMUInit()):
print(_('ERROR: IMU Init Failed, no inertial data available'), t0)
self.error.set('IMU Failed')
self.s.IMUType = 0
return
rtimu.setSlerpPower(0.01)
rtimu.setGyroEnable(True)
rtimu.setAccelEnable(True)
rtimu.setCompassEnable(True)
time.sleep(0.1)
self.rtimu = rtimu
self.avggyro = [0, 0, 0]
self.compass_calibration_updated = False
self.axes_test = ([False] * 9)
self.last_axes = False
self.error.set('IMU not initialized')
def process(self, pipe):
print('imu process', os.getpid())
if (not RTIMU):
while True:
time.sleep(10)
if os.system(('sudo chrt -pf 2 %d 2>&1 > /dev/null' % os.getpid())):
print(_('warning, failed to make imu process realtime'))
else:
print(_('made imu process realtime'))
self.setup()
while True:
t0 = time.monotonic()
data = self.read()
t1 = time.monotonic()
pipe.send(data, (not data))
t2 = time.monotonic()
if (not self.s.GyroBiasValid):
if self.gyrobias.value:
print(_('setting initial gyro bias'), self.gyrobias.value)
self.s.GyroBias = tuple(map(math.radians, self.gyrobias.value))
self.s.GyroBiasValid = True
if ((t0 - self.lastgyrobiastime) > 30):
self.gyrobias.set(list(map(math.degrees, self.s.GyroBias)))
self.lastgyrobiastime = t0
self.s.GyroBiasValid = True
self.poll()
t3 = time.monotonic()
dt = (time.monotonic() - t0)
period = (1 / self.rate)
t = (period - dt)
if ((t > 0) and (t < period)):
time.sleep(t)
else:
print(_('imu process failed to keep time'), dt, t0, t1, t2, t3)
def read(self):
t0 = time.monotonic()
if (not self.s.IMUType):
self.init()
return False
if (not self.rtimu.IMURead()):
print(_('failed to read IMU!'), t0)
self.init()
return False
data = self.rtimu.getIMUData()
data['accel.residuals'] = list(self.rtimu.getAccelResiduals())
data['timestamp'] = t0
if self.compass_calibration_updated:
data['compass_calibration_updated'] = True
self.compass_calibration_updated = False
self.lastdata = (list(data['accel']), list(data['gyro']), list(data['compass']))
return data
def poll(self):
msgs = self.client.receive()
for name in msgs:
value = msgs[name]
if (name == 'imu.accel.calibration'):
self.s.AccelCalValid = True
(b, t) = (value[0][:3], value[0][3])
self.s.AccelCalMin = ((b[0] - t), (b[1] - t), (b[2] - t))
self.s.AccelCalMax = ((b[0] + t), (b[1] + t), (b[2] + t))
elif (name == 'imu.compass.calibration'):
self.compass_calibration_updated = True
self.s.CompassCalEllipsoidValid = True
self.s.CompassCalEllipsoidOffset = tuple(value[0][:3])
if self.rtimu:
self.rtimu.resetFusion()
elif (name == 'imu.rate'):
self.rate = value
print(_('imu rate set to rate'), value)
if (not self.lastdata):
return
(accel, gyro, compass) = self.lastdata
self.lastdata = False
if self.axes_test:
axes = ((accel + gyro) + compass)
if self.last_axes:
self.axes_test = map((lambda a, b, p: (p or (a != b))), axes, self.last_axes, self.axes_test)
self.last_axes = axes
if (not all(self.axes_test)):
self.error.set('IMU waiting on axes')
else:
print('IMU all sensor axes verified')
self.error.set('')
self.axes_test = False
d = (0.05 / self.rate)
for i in range(3):
self.avggyro[i] = (((1 - d) * self.avggyro[i]) + (d * gyro[i]))
if (vector.norm(self.avggyro) > 0.8):
print(_('too high standing gyro bias, resetting sensors'), gyro, self.avggyro)
self.init()
if any(map((lambda x: (abs(x) > 1000)), compass)):
print(_('compass out of range, resetting'), compass)
self.init() |
def collapse_aware_exception_split(exc, etype):
if (not isinstance(exc, BaseExceptionGroup)):
if isinstance(exc, etype):
return (exc, None)
else:
return (None, exc)
(match, rest) = exc.split(etype)
if isinstance(match, BaseExceptionGroup):
match = collapse_exception_group(match)
if isinstance(rest, BaseExceptionGroup):
rest = collapse_exception_group(rest)
return (match, rest) |
def test_inconsistent_array_params(location, sapm_module_params, cec_module_params):
module_error = '.* selected for the DC model but one or more Arrays are missing one or more required parameters'
temperature_error = 'could not infer temperature model from system\\.temperature_model_parameters\\. Check that all Arrays in system\\.arrays have parameters for the same temperature model\\. Common temperature model parameters: .*'
different_module_system = pvsystem.PVSystem(arrays=[pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters=sapm_module_params), pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params), pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params)])
with pytest.raises(ValueError, match=module_error):
ModelChain(different_module_system, location, dc_model='cec')
different_temp_system = pvsystem.PVSystem(arrays=[pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params, temperature_model_parameters={'a': 1, 'b': 1, 'deltaT': 1}), pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params, temperature_model_parameters={'a': 2, 'b': 2, 'deltaT': 2}), pvsystem.Array(mount=pvsystem.FixedMount(0, 180), module_parameters=cec_module_params, temperature_model_parameters={'b': 3, 'deltaT': 3})])
with pytest.raises(ValueError, match=temperature_error):
ModelChain(different_temp_system, location, ac_model='sandia', aoi_model='no_loss', spectral_model='no_loss', temperature_model='sapm') |
def list_tags_raw(filenames):
for filename in filenames:
print('Raw IDv2 tag info for', filename)
try:
id3 = mutagen.id3.ID3(filename, translate=False)
except mutagen.id3.ID3NoHeaderError:
print(u'No ID3 header found; skipping.')
except Exception as err:
print(str(err), file=sys.stderr)
raise SystemExit(1)
else:
for frame in id3.values():
print(str(repr(frame))) |
class CombinedROIHeads(nn.ModuleDict):
def __init__(self, heads):
super().__init__(heads)
if (config.MODEL.INSTANCE2D.ROI_HEADS.ROI_MASK_HEAD.USE and config.MODEL.INSTANCE2D.ROI_HEADS.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
self.mask.feature_extractor = self.box.feature_extractor
def forward(self, features, proposals, targets=None):
losses = {}
(x, detections, loss_box) = self.box(features, proposals, targets)
losses.update(loss_box)
if config.MODEL.INSTANCE2D.ROI_HEADS.ROI_MASK_HEAD.USE:
mask_features = features
if (self.training and config.MODEL.INSTANCE2D.ROI_HEADS.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
mask_features = x
(detections, loss_mask) = self.mask(mask_features, detections, targets)
losses.update(loss_mask)
return (detections, losses)
def inference(self, features, proposals):
(x, detections, _) = self.box(features, proposals)
if config.MODEL.INSTANCE2D.ROI_HEADS.ROI_MASK_HEAD.USE:
mask_features = features
if (self.training and config.MODEL.INSTANCE2D.ROI_HEADS.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
mask_features = x
(detections, _) = self.mask(mask_features, detections, None)
return detections |
def extract_feature(model, dataloaders):
features = torch.FloatTensor()
count = 0
for data in dataloaders:
(img, label) = data
(n, c, h, w) = img.size()
count += n
print(count)
if opt.use_dense:
ff = torch.FloatTensor(n, 1024).zero_()
else:
ff = torch.FloatTensor(n, 2048).zero_()
for i in range(2):
if (i == 1):
img = fliplr(img)
input_img = Variable(img.cuda())
outputs = model(input_img, False)
f = outputs.data.cpu()
ff = (ff + f)
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features, ff), 0)
return features |
class VOT(object):
def __init__(self, region_format, channels=None):
assert (region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON])
if (channels is None):
channels = ['color']
elif (channels == 'rgbd'):
channels = ['color', 'depth']
elif (channels == 'rgbt'):
channels = ['color', 'ir']
elif (channels == 'ir'):
channels = ['ir']
else:
raise Exception('Illegal configuration {}.'.format(channels))
self._trax = trax.Server([region_format], [trax.Image.PATH], channels)
request = self._trax.wait()
assert (request.type == 'initialize')
if isinstance(request.region, trax.Polygon):
self._region = Polygon([Point(x[0], x[1]) for x in request.region])
else:
self._region = Rectangle(*request.region.bounds())
self._image = [str(x) for (k, x) in request.image.items()]
if (len(self._image) == 1):
self._image = self._image[0]
self._trax.status(request.region)
def region(self):
return self._region
def report(self, region, confidence=None):
assert (isinstance(region, Rectangle) or isinstance(region, Polygon))
if isinstance(region, Polygon):
tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])
else:
tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)
properties = {}
if (not (confidence is None)):
properties['confidence'] = confidence
self._trax.status(tregion, properties)
def frame(self):
if hasattr(self, '_image'):
image = self._image
del self._image
return tuple(image)
request = self._trax.wait()
if (request.type == 'frame'):
image = [str(x) for (k, x) in request.image.items()]
if (len(image) == 1):
image = image[0]
return tuple(image)
else:
return None
def quit(self):
if hasattr(self, '_trax'):
self._trax.quit()
def __del__(self):
self.quit() |
class Effect4045(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Smart Bomb')), 'empFieldRange', module.getModifiedItemAttr('empFieldRangeMultiplier'), **kwargs) |
def test_delete_invalid_driver(path_rgb_byte_tif, tmpdir):
path = str(tmpdir.join('test_invalid_driver.tif'))
rasterio.shutil.copy(path_rgb_byte_tif, path)
with pytest.raises(DriverRegistrationError) as e:
rasterio.shutil.delete(path, driver='trash')
assert ('Unrecognized driver' in str(e.value)) |
class FlaxHybridCLIPModule(nn.Module):
config: HybridCLIPConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
text_config = self.config.text_config
vision_config = self.config.vision_config
self.projection_dim = self.config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
text_module = FLAX_MODEL_MAPPING[self.config.text_config.__class__].module_class
vision_module = FLAX_MODEL_MAPPING.get(self.config.vision_config.__class__, FlaxCLIPVisionModel).module_class
self.text_model = text_module(text_config, dtype=self.dtype)
self.vision_model = vision_module(vision_config, dtype=self.dtype)
self.visual_projection = nn.Dense(self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False)
self.text_projection = nn.Dense(self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False)
self.logit_scale = self.param('logit_scale', jax.nn.initializers.ones, [])
def __call__(self, input_ids=None, pixel_values=None, attention_mask=None, position_ids=None, token_type_ids=None, deterministic: bool=True, output_attentions=None, output_hidden_states=None, return_dict=None):
return_dict = (return_dict if (return_dict is not None) else self.config.return_dict)
vision_outputs = self.vision_model(pixel_values=pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
image_embeds = (image_embeds / jnp.linalg.norm(image_embeds, axis=(- 1), keepdims=True))
text_embeds = (text_embeds / jnp.linalg.norm(text_embeds, axis=(- 1), keepdims=True))
logit_scale = jnp.exp(self.logit_scale)
logits_per_text = (jnp.matmul(text_embeds, image_embeds.T) * logit_scale)
logits_per_image = logits_per_text.T
if (not return_dict):
return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return FlaxCLIPOutput(logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs) |
def test_op_invalid_input_types():
class TestOp(pytensor.graph.op.Op):
itypes = [dvector, dvector, dvector]
otypes = [dvector]
def perform(self, node, inputs, outputs):
pass
msg = '^Invalid input types for Op.*'
with pytest.raises(TypeError, match=msg):
TestOp()(dvector(), dscalar(), dvector()) |
(reason='data is local')
def test_gacos():
corr = GACOSCorrection()
corr.load('/home/marius/Development/testing/kite/GACOS/.ztd')
grd = corr.grids[0]
d = grd.get_corrections(grd.llLat, grd.llLon, (- grd.dLat), grd.dLon, grd.rows, grd.cols)
d = grd.get_corrections(grd.llLat, grd.llLon, ((- grd.dLat) * 2), (grd.dLon * 2), (grd.rows // 2), (grd.cols // 2))
d = grd.get_corrections((grd.llLat + 0.01), (grd.llLon + 0.01), ((- grd.dLat) / 1.5), (grd.dLon / 1.5), int((grd.rows * 1.0)), int((grd.cols * 1.0)))
plt.imshow(grd.data)
plt.show()
plt.imshow(d)
plt.show() |
def get_pyramidnet_cifar(num_classes, blocks, alpha, bottleneck, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (num_classes in [10, 100])
if bottleneck:
assert (((blocks - 2) % 9) == 0)
layers = ([((blocks - 2) // 9)] * 3)
else:
assert (((blocks - 2) % 6) == 0)
layers = ([((blocks - 2) // 6)] * 3)
init_block_channels = 16
growth_add = (float(alpha) / float(sum(layers)))
from functools import reduce
channels = reduce((lambda xi, yi: (xi + [[(((i + 1) * growth_add) + xi[(- 1)][(- 1)]) for i in list(range(yi))]])), layers, [[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[(cij * 4) for cij in ci] for ci in channels]
net = CIFARPyramidNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def nvram_listener():
server_address = f'{ROOTFS}/var/cfm_socket'
if os.path.exists(server_address):
os.unlink(server_address)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(server_address)
sock.listen(1)
data = bytearray()
with open('cfm_socket.log', 'wb') as ofile:
while True:
(connection, _) = sock.accept()
try:
while True:
data += connection.recv(1024)
if (b'lan.webiplansslen' not in data):
break
connection.send(b'192.168.170.169')
ofile.write(data)
data.clear()
finally:
connection.close() |
def get_gcn_fact(adj):
adj_ = (adj + np.eye(node_num, node_num))
row_sum = np.array(adj_.sum(1))
d_inv_sqrt = np.power(row_sum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = np.mat(np.diag(d_inv_sqrt))
gcn_fact = ((d_mat_inv_sqrt * adj_) * d_mat_inv_sqrt)
return gcn_fact |
class Migration(migrations.Migration):
dependencies = [('conferences', '0010_merge__1807')]
operations = [migrations.AlterField(model_name='conference', name='introduction', field=i18n.fields.I18nTextField(verbose_name='introduction')), migrations.AlterField(model_name='conference', name='name', field=i18n.fields.I18nCharField(max_length=100, verbose_name='name'))] |
def Mv_setup_options():
Print_Function()
(o3d, e1, e2, e3) = Ga.build('e_1 e_2 e_3', g=[1, 1, 1])
v = o3d.mv('v', 'vector')
print(v)
(o3d, e1, e2, e3) = Ga.build('e*1|2|3', g=[1, 1, 1])
v = o3d.mv('v', 'vector')
print(v)
(o3d, e1, e2, e3) = Ga.build('e*x|y|z', g=[1, 1, 1])
v = o3d.mv('v', 'vector')
print(v)
coords = symbols('x y z', real=True)
(o3d, e1, e2, e3) = Ga.build('e', g=[1, 1, 1], coords=coords)
v = o3d.mv('v', 'vector')
print(v)
print(v.grade(2))
print(v.i_grade)
return |
.online
def test_pypi_multiple_pkg(cache_dir):
pypi = service.PyPIService(cache_dir)
deps: list[service.Dependency] = [service.ResolvedDependency('jinja2', Version('2.4.1')), service.ResolvedDependency('flask', Version('0.5'))]
results: dict[(service.Dependency, list[service.VulnerabilityResult])] = dict(pypi.query_all(iter(deps)))
assert (len(results) == 2)
assert ((deps[0] in results) and (deps[1] in results))
assert (len(results[deps[0]]) > 0)
assert (len(results[deps[1]]) > 0) |
def log_debug_tracing(func):
def wrapper(self, *args, **kwargs):
func_name = ('%s.%s' % (self.__class__.__name__, func.__name__))
self.log(message='On {}, body {}, kwargs {}'.format(func_name, args[0].request.body, str(kwargs)), level=logging.DEBUG)
return func(self, *args, **kwargs)
return wrapper |
class WIREGUARD(asyncio.DatagramProtocol):
def __init__(self, args):
self.args = args
self.preshared_key = (b'\x00' * 32)
self.ippacket = ip.IPPacket(args)
self.private_key = hashlib.blake2s(args.passwd.encode()).digest()
self.public_key = crypto.X25519(self.private_key, 9)
self.keys = {}
self.index_generators = {}
self.sender_index_generator = itertools.count()
print(' WIREGUARD SETTING ')
print('PublicKey:', base64.b64encode(self.public_key).decode())
print('')
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
cmd = int.from_bytes(data[0:4], 'little')
if ((cmd == 1) and (len(data) == 148)):
HASH = (lambda x: hashlib.blake2s(x).digest())
MAC = (lambda key, x: hashlib.blake2s(x, key=key, digest_size=16).digest())
HMAC = (lambda key, x: hmac.digest(key, x, hashlib.blake2s))
(p, mac1, mac2) = struct.unpack('<116s16s16s', data)
assert (mac1 == MAC(HASH((b'mac1----' + self.public_key)), p))
assert (mac2 == (b'\x00' * 16))
index = next(self.sender_index_generator)
(sender_index, unencrypted_ephemeral, encrypted_static, encrypted_timestamp) = struct.unpack('<4xI32s48s28s', data[:(- 32)])
chaining_key = HASH(b'Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s')
hash0 = HASH((HASH((HASH((chaining_key + b'WireGuard v1 zx2c4 .com')) + self.public_key)) + unencrypted_ephemeral))
chaining_key = HMAC(HMAC(chaining_key, unencrypted_ephemeral), b'\x01')
temp = HMAC(chaining_key, crypto.X25519(self.private_key, unencrypted_ephemeral))
chaining_key = HMAC(temp, b'\x01')
static_public = crypto.aead_chacha20poly1305_decrypt(HMAC(temp, (chaining_key + b'\x02')), 0, encrypted_static, hash0)
hash0 = HASH((hash0 + encrypted_static))
temp = HMAC(chaining_key, crypto.X25519(self.private_key, static_public))
chaining_key = HMAC(temp, b'\x01')
timestamp = crypto.aead_chacha20poly1305_decrypt(HMAC(temp, (chaining_key + b'\x02')), 0, encrypted_timestamp, hash0)
hash0 = HASH((hash0 + encrypted_timestamp))
ephemeral_private = os.urandom(32)
ephemeral_public = crypto.X25519(ephemeral_private, 9)
hash0 = HASH((hash0 + ephemeral_public))
chaining_key = HMAC(HMAC(HMAC(HMAC(HMAC(HMAC(chaining_key, ephemeral_public), b'\x01'), crypto.X25519(ephemeral_private, unencrypted_ephemeral)), b'\x01'), crypto.X25519(ephemeral_private, static_public)), b'\x01')
temp = HMAC(chaining_key, self.preshared_key)
chaining_key = HMAC(temp, b'\x01')
temp2 = HMAC(temp, (chaining_key + b'\x02'))
key = HMAC(temp, (temp2 + b'\x03'))
hash0 = HASH((hash0 + temp2))
encrypted_nothing = crypto.aead_chacha20poly1305_encrypt(key, 0, b'', hash0)
msg = struct.pack('<III32s16s', 2, index, sender_index, ephemeral_public, encrypted_nothing)
msg = ((msg + MAC(HASH((b'mac1----' + static_public)), msg)) + (b'\x00' * 16))
self.transport.sendto(msg, addr)
print('login', addr, sender_index)
temp = HMAC(chaining_key, b'')
receiving_key = HMAC(temp, b'\x01')
sending_key = HMAC(temp, (receiving_key + b'\x02'))
self.keys[index] = (sender_index, receiving_key, sending_key)
self.index_generators[index] = itertools.count()
elif ((cmd == 4) and (len(data) >= 32)):
(_, index, counter) = struct.unpack('<IIQ', data[:16])
(sender_index, receiving_key, sending_key) = self.keys[index]
packet = crypto.aead_chacha20poly1305_decrypt(receiving_key, counter, data[16:], b'')
def reply(data):
counter = next(self.index_generators[index])
data = (data + (b'\x00' * ((- len(data)) % 16)))
msg = crypto.aead_chacha20poly1305_encrypt(sending_key, counter, data, b'')
msg = (struct.pack('<IIQ', 4, sender_index, counter) + msg)
self.transport.sendto(msg, addr)
return True
if packet:
self.ippacket.handle_ipv4(addr[:2], packet, reply)
else:
reply(b'') |
def validator(package):
try:
if (package.size > PLUGIN_MAX_UPLOAD_SIZE):
raise ValidationError((_('File is too big. Max size is %s Megabytes') % (PLUGIN_MAX_UPLOAD_SIZE / 1000000)))
except AttributeError:
if (package.len > PLUGIN_MAX_UPLOAD_SIZE):
raise ValidationError((_('File is too big. Max size is %s Megabytes') % (PLUGIN_MAX_UPLOAD_SIZE / 1000000)))
try:
zip = zipfile.ZipFile(package)
except:
raise ValidationError(_('Could not unzip file.'))
for zname in zip.namelist():
if ((zname.find('..') != (- 1)) or (zname.find(os.path.sep) == 0)):
raise ValidationError(_("For security reasons, zip file cannot contain path information (found '{}')".format(zname)))
if (zname.find('.pyc') != (- 1)):
raise ValidationError(_('For security reasons, zip file cannot contain .pyc file'))
for forbidden_dir in ['__MACOSX', '.git', '__pycache__']:
if (forbidden_dir in zname.split('/')):
raise ValidationError(_(("For security reasons, zip file cannot contain '%s' directory" % (forbidden_dir,))))
bad_file = zip.testzip()
if bad_file:
zip.close()
del zip
try:
raise ValidationError((_('Bad zip (maybe a CRC error) on file %s') % bad_file))
except UnicodeDecodeError:
raise ValidationError((_('Bad zip (maybe unicode filename) on file %s') % bad_file), errors='replace')
namelist = zip.namelist()
try:
package_name = namelist[0][:namelist[0].index('/')]
except:
raise ValidationError(_('Cannot find a folder inside the compressed package: this does not seems a valid plugin'))
if package_name.endswith('/'):
package_name = package_name[:(- 1)]
initname = (package_name + '/__init__.py')
metadataname = (package_name + '/metadata.txt')
if ((initname not in namelist) and (metadataname not in namelist)):
raise ValidationError((_('Cannot find __init__.py or metadata.txt in the compressed package: this does not seems a valid plugin (I searched for %s and %s)') % (initname, metadataname)))
if (initname not in namelist):
raise ValidationError(_('Cannot find __init__.py in plugin package.'))
metadata = []
if (metadataname in namelist):
try:
parser = configparser.ConfigParser()
parser.optionxform = str
parser.readfp(StringIO(codecs.decode(zip.read(metadataname), 'utf8')))
if (not parser.has_section('general')):
raise ValidationError((_("Cannot find a section named 'general' in %s") % metadataname))
metadata.extend(parser.items('general'))
except Exception as e:
raise ValidationError((_('Errors parsing %s. %s') % (metadataname, e)))
metadata.append(('metadata_source', 'metadata.txt'))
else:
initcontent = zip.read(initname).decode('utf8')
metadata.extend(_read_from_init(initcontent, initname))
if (not metadata):
raise ValidationError((_('Cannot find valid metadata in %s') % initname))
metadata.append(('metadata_source', '__init__.py'))
_check_required_metadata(metadata)
try:
if dict(metadata)['icon'].startswith('./'):
icon_path = dict(metadata)['icon'][2:]
else:
icon_path = dict(metadata)['icon']
icon = zip.read(((package_name + '/') + icon_path))
icon_file = SimpleUploadedFile(dict(metadata)['icon'], icon, mimetypes.guess_type(dict(metadata)['icon']))
except:
icon_file = None
metadata.append(('icon_file', icon_file))
for flag in PLUGIN_BOOLEAN_METADATA:
if (flag in dict(metadata)):
metadata[metadata.index((flag, dict(metadata)[flag]))] = (flag, ((dict(metadata)[flag].lower() == 'true') or (dict(metadata)[flag].lower() == '1')))
if (not re.match('^[A-Za-z][A-Za-z0-9-_]+$', package_name)):
raise ValidationError(_("The name of the top level directory inside the zip package must start with an ASCII letter and can only contain ASCII letters, digits and the signs '-' and '_'."))
metadata.append(('package_name', package_name))
min_qgs_version = dict(metadata).get('qgisMinimumVersion')
dict(metadata).get('qgisMaximumVersion')
if ((tuple(min_qgs_version.split('.')) < tuple('1.8'.split('.'))) and (metadataname in namelist)):
initcontent = zip.read(initname).decode('utf8')
try:
initmetadata = _read_from_init(initcontent, initname)
initmetadata.append(('metadata_source', '__init__.py'))
_check_required_metadata(initmetadata)
except ValidationError as e:
raise ValidationError((_('qgisMinimumVersion is set to less than 1.8 (%s) and there were errors reading metadata from the __init__.py file. This can lead to errors in versions of QGIS less than 1.8, please either set the qgisMinimumVersion to 1.8 or specify the metadata also in the __init__.py file. Reported error was: %s') % (min_qgs_version, ','.join(e.messages))))
_check_url_link(dict(metadata).get('tracker'), ' 'Bug tracker')
_check_url_link(dict(metadata).get('repository'), ' 'Repository')
_check_url_link(dict(metadata).get('homepage'), ' 'Home page')
licensename = (package_name + '/LICENSE')
if (licensename not in namelist):
metadata.append(('license_recommended', 'Yes'))
zip.close()
del zip
if ('author' in dict(metadata)):
if (not re.match('^[^/]+$', dict(metadata)['author'])):
raise ValidationError(_('Author name cannot contain slashes.'))
checked_metadata = []
for (k, v) in metadata:
try:
if (not ((k in PLUGIN_BOOLEAN_METADATA) or (k == 'icon_file'))):
checked_metadata.append((k, v.strip()))
else:
checked_metadata.append((k, v))
except UnicodeDecodeError as e:
raise ValidationError((_("There was an error converting metadata '%s' to UTF-8 . Reported error was: %s") % (k, e)))
return checked_metadata |
def Var(term=None, *others, dom=None, id=None):
global started_modeling
if ((not started_modeling) and (not options.uncurse)):
cursing()
started_modeling = True
if ((term is None) and (dom is None)):
dom = Domain(math.inf)
assert (not (term and dom))
if (term is not None):
dom = flatten(term, others)
if (not isinstance(dom, Domain)):
if isinstance(dom, (set, frozenset)):
dom = list(dom)
if (isinstance(dom, (tuple, list)) and (len(dom) > 1) and isinstance(dom[0], int)):
dom = sorted(dom)
if (((dom[(- 1)] - dom[0]) + 1) == len(dom)):
dom = range(dom[0], (dom[(- 1)] + 1))
dom = Domain(dom)
error_if((dom.get_type() not in {TypeVar.INTEGER, TypeVar.SYMBOLIC}), ('Currently, only integer and symbolic variables are supported. Problem with ' + str(dom)))
var_name = (id if id else extract_declaration_for('Var'))
error_if((not _valid_identifier(var_name)), (('The variable identifier ' + str(var_name)) + ' is not valid'))
error_if((var_name in Variable.name2obj), (('The identifier ' + str(var_name)) + ' is used twice. This is not possible'))
(comment, tags) = comment_and_tags_of(function_name='Var')
assert isinstance(comment, (str, type(None))), 'A comment must be a string (or None). Usually, they are given on plain lines preceding the declaration'
var_object = (VariableInteger(var_name, dom) if (dom.get_type() == TypeVar.INTEGER) else VariableSymbolic(var_name, dom))
Variable.name2obj[var_name] = var_object
EVar(var_object, comment, tags)
return var_object |
def get_latest_table_version(namespace: str, table_name: str, *args, **kwargs) -> Optional[TableVersion]:
table_versions = list_table_versions(namespace, table_name, *args, **kwargs).all_items()
if (not table_versions):
return None
table_versions.sort(reverse=True, key=(lambda v: int(v.table_version)))
return table_versions[0] |
class GraphvizLexer(RegexLexer):
name = 'Graphviz'
url = '
aliases = ['graphviz', 'dot']
filenames = ['*.gv', '*.dot']
mimetypes = ['text/x-graphviz', 'text/vnd.graphviz']
version_added = '2.8'
tokens = {'root': [('\\s+', Whitespace), ('(#|//).*?$', Comment.Single), ('/(\\\\\\n)?[*](.|\\n)*?[*](\\\\\\n)?/', Comment.Multiline), ('(?i)(node|edge|graph|digraph|subgraph|strict)\\b', Keyword), ('--|->', Operator), ('[{}[\\]:;,]', Punctuation), ('(\\b\\D\\w*)(\\s*)(=)(\\s*)', bygroups(Name.Attribute, Whitespace, Punctuation, Whitespace), 'attr_id'), ('\\b(n|ne|e|se|s|sw|w|nw|c|_)\\b', Name.Builtin), ('\\b\\D\\w*', Name.Tag), ('[-]?((\\.[0-9]+)|([0-9]+(\\.[0-9]*)?))', Number), ('"(\\\\"|[^"])*?"', Name.Tag), ('<', Punctuation, 'xml')], 'attr_id': [('\\b\\D\\w*', String, '#pop'), ('[-]?((\\.[0-9]+)|([0-9]+(\\.[0-9]*)?))', Number, '#pop'), ('"(\\\\"|[^"])*?"', String.Double, '#pop'), ('<', Punctuation, ('#pop', 'xml'))], 'xml': [('<', Punctuation, '#push'), ('>', Punctuation, '#pop'), ('\\s+', Whitespace), ('[^<>\\s]', Name.Tag)]} |
.parametrize('broken_role', [qt_api.QtCore.Qt.ItemDataRole.ToolTipRole, qt_api.QtCore.Qt.ItemDataRole.StatusTipRole, qt_api.QtCore.Qt.ItemDataRole.WhatsThisRole, qt_api.QtCore.Qt.ItemDataRole.SizeHintRole, qt_api.QtCore.Qt.ItemDataRole.FontRole, qt_api.QtCore.Qt.ItemDataRole.BackgroundRole, qt_api.QtCore.Qt.ItemDataRole.ForegroundRole, qt_api.QtCore.Qt.ItemDataRole.TextAlignmentRole, qt_api.QtCore.Qt.ItemDataRole.CheckStateRole])
def test_broken_types(check_model, broken_role):
class BrokenTypeModel(qt_api.QtCore.QAbstractListModel):
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
if (parent == qt_api.QtCore.QModelIndex()):
return 1
else:
return 0
def data(self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.ItemDataRole.DisplayRole):
if (role == broken_role):
return object()
else:
return None
check_model(BrokenTypeModel(), should_pass=False) |
.skipif((literal_eval(os.getenv('TEST_SAGEMAKER', 'False')) is not True), reason='Skipping test because should only be run when releasing minor transformers version')
.usefixtures('sm_env')
_class([{'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}}, {'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}}])
class MultiNodeTest(unittest.TestCase):
def setUp(self):
if (self.framework == 'pytorch'):
subprocess.run(f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(), encoding='utf-8', check=True)
assert hasattr(self, 'env')
def create_estimator(self, instance_count):
mpi_options = {'enabled': True, 'processes_per_host': 8}
smp_options = {'enabled': True, 'parameters': {'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True}}
distribution = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
name_extension = ('trainer' if (self.script == 'run_glue.py') else 'smtrainer')
return HuggingFace(entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}', instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 500}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version='py36')
def save_results_as_csv(self, job_name):
TrainingJobAnalytics(job_name).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv')
([(1,)])
def test_scripz(self, instance_count):
estimator = self.create_estimator(instance_count)
estimator.fit()
result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
eval_accuracy = list(result_metrics_df[(result_metrics_df.metric_name == 'eval_accuracy')]['value'])
eval_loss = list(result_metrics_df[(result_metrics_df.metric_name == 'eval_loss')]['value'])
train_runtime = Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds', 999999)
assert (train_runtime <= self.results['train_runtime'])
assert all(((t >= self.results['eval_accuracy']) for t in eval_accuracy))
assert all(((t <= self.results['eval_loss']) for t in eval_loss))
with open(f'{estimator.latest_training_job.name}.json', 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, outfile) |
_grad()
def evaluate(model, criterion, postprocessors, data_loader, evaluator_list, device, args):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
predictions = []
for (samples, targets) in metric_logger.log_every(data_loader, 10, header):
dataset_name = targets[0]['dataset_name']
samples = samples.to(device)
captions = [t['caption'] for t in targets]
targets = utils.targets_to(targets, device)
outputs = model(samples, captions, targets)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: (v * weight_dict[k]) for (k, v) in loss_dict_reduced.items() if (k in weight_dict)}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for (k, v) in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
orig_target_sizes = torch.stack([t['orig_size'] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if ('segm' in postprocessors.keys()):
target_sizes = torch.stack([t['size'] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for (target, output) in zip(targets, results)}
for evaluator in evaluator_list:
evaluator.update(res)
for (p, target) in zip(results, targets):
for (s, b, m) in zip(p['scores'], p['boxes'], p['rle_masks']):
predictions.append({'image_id': target['image_id'].item(), 'category_id': 1, 'bbox': b.tolist(), 'segmentation': m, 'score': s.item()})
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
for evaluator in evaluator_list:
evaluator.synchronize_between_processes()
refexp_res = None
for evaluator in evaluator_list:
if isinstance(evaluator, CocoEvaluator):
evaluator.accumulate()
evaluator.summarize()
elif isinstance(evaluator, RefExpEvaluator):
refexp_res = evaluator.summarize()
stats = {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
for evaluator in evaluator_list:
if isinstance(evaluator, CocoEvaluator):
if ('bbox' in postprocessors.keys()):
stats['coco_eval_bbox'] = evaluator.coco_eval['bbox'].stats.tolist()
if ('segm' in postprocessors.keys()):
stats['coco_eval_masks'] = evaluator.coco_eval['segm'].stats.tolist()
if (refexp_res is not None):
stats.update(refexp_res)
gathered_pred_lists = utils.all_gather(predictions)
predictions = [p for p_list in gathered_pred_lists for p in p_list]
eval_metrics = {}
if utils.is_main_process():
if (dataset_name == 'refcoco'):
coco_gt = COCO(os.path.join(args.coco_path, 'refcoco/instances_refcoco_val.json'))
elif (dataset_name == 'refcoco+'):
coco_gt = COCO(os.path.join(args.coco_path, 'refcoco+/instances_refcoco+_val.json'))
elif (dataset_name == 'refcocog'):
coco_gt = COCO(os.path.join(args.coco_path, 'refcocog/instances_refcocog_val.json'))
else:
raise NotImplementedError
coco_pred = coco_gt.loadRes(predictions)
coco_eval = COCOeval(coco_gt, coco_pred, iouType='segm')
coco_eval.params.useCats = 0
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
(precision_at_k, overall_iou, mean_iou) = calculate_bbox_precision_at_k_and_iou_metrics(coco_gt, coco_pred)
eval_metrics.update({f'bbox {k}': m for (k, m) in zip([0.5, 0.6, 0.7, 0.8, 0.9], precision_at_k)})
eval_metrics.update({'bbox overall_iou': overall_iou, 'bbox mean_iou': mean_iou})
(precision_at_k, overall_iou, mean_iou) = calculate_precision_at_k_and_iou_metrics(coco_gt, coco_pred)
eval_metrics.update({f'segm {k}': m for (k, m) in zip([0.5, 0.6, 0.7, 0.8, 0.9], precision_at_k)})
eval_metrics.update({'segm overall_iou': overall_iou, 'segm mean_iou': mean_iou})
print(eval_metrics)
stats.update(eval_metrics)
return stats |
def plot_hyperparam(hyperparam_to_plot, fig=None, ax_arr=None, big_ax=None, ylims=YLIMS, legend=False, dpi=300, figsize=(6, 5.5)):
if ((fig is None) and (ax_arr is None)):
(fig, ax_arr) = plt.subplots(2, 2, dpi=dpi, figsize=figsize)
for ax_ in ax_arr.flatten():
ax_.tick_params(pad=0.1)
if (big_ax is None):
big_ax = fig.add_subplot(111, frameon=False)
big_ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
big_ax.grid(False)
SPECIES = ('Bengalese Finch', 'Canary')
HYPERPARAM_EXPTS = {'window_size': 'Window size', 'hidden_size': 'Hidden state size'}
HYERPARAM_FOR_FINAL_RESULTS = {'Bengalese Finch': {'hidden_size': 256, 'window_size': 176}, 'Canary': {'hidden_size': 512, 'window_size': 370}}
METRICS = {'avg_error': 'Frame\nerror (%)', 'avg_segment_error_rate': 'Syllable\nerror rate (%)'}
for (hyperparam_num, (hyperparam_expt, hyperparam_label)) in enumerate(HYPERPARAM_EXPTS.items()):
if (hyperparam_expt != hyperparam_to_plot):
continue
for (col, species) in enumerate(SPECIES):
for (metric_num, (metric_column_name, metric_label)) in enumerate(METRICS.items()):
data = hyperparams_expt_df[(((hyperparams_expt_df.species == species) & (hyperparams_expt_df.hyperparam_expt == hyperparam_expt)) & (hyperparams_expt_df.train_set_dur == TRAIN_SET_DUR_TO_USE[species]))]
row = metric_num
ax = ax_arr[(row, col)]
if (row == 0):
ax.set_title(species)
if ((row == 1) and (col == 1)):
plot_legend = True
else:
plot_legend = False
g = sns.boxplot(data=data, x='hyperparam_val', y=metric_column_name, showfliers=False, hue='Post-processing', palette=POST_PROCESS_PALETTE, ax=ax)
if plot_legend:
(handles, labels) = ax.get_legend_handles_labels()
g.legend_.remove()
ax.set_ylim(ylims[species][metric_column_name])
ax.set_xlabel('')
if ((row == 1) or (row == 3)):
new_xticklabels = []
for xticklabel in ax.get_xticklabels():
if (int(xticklabel.get_text()) == HYERPARAM_FOR_FINAL_RESULTS[species][hyperparam_expt]):
new_xticklabels.append(f'$f{{{xticklabel.get_text()}}}$')
else:
new_xticklabels.append(xticklabel.get_text())
ax.set_xticklabels(new_xticklabels, rotation=45)
else:
ax.set_xticklabels([])
if (col == 0):
ax.set_ylabel(f'''{metric_label}
max. train dur.''')
else:
ax.set_ylabel('')
big_ax.set_xlabel(hyperparam_label, fontweight='bold', labelpad=15)
sns.despine(fig)
if legend:
big_ax.legend(title='Post-processing', handles=handles, labels=['With', 'Without'], loc='lower right', bbox_to_anchor=((- 0.2), (- 0.05)))
return (fig, ax_arr) |
def load_plugin_elements_by_name(plugin_name: str):
assert (plugin_name in PluginName.__members__), 'Unknown plugin name {}.'.format(plugin_name)
plugin_dir_name = PluginName[plugin_name].value
plugin_file_path = os.path.join(CURRENT_PATH, plugin_dir_name)
data_model_file_path = os.path.join(CURRENT_PATH, '..', 'data_model', 'plugin', plugin_dir_name)
meta_info_path = os.path.join(plugin_file_path, AI_PLUGIN_FILE)
assert os.path.exists(meta_info_path), f'Missing file {meta_info_path} that contains meta info for {plugin_name}'
with open(meta_info_path, 'r') as f:
meta_info = json.load(f)
tmp = _load_module(plugin_dir_name, os.path.join(plugin_file_path, 'paths', '__init__.py'))
assert hasattr(tmp, 'path_dict'), f'Missing variable path_dict in __init__.py'
yaml_path = os.path.join(plugin_file_path, PLUGIN_SPEC_FILE)
assert os.path.exists(yaml_path), f'Missing file: {yaml_path}'
openapi_yaml_json = APIYamlModel.from_yaml(yaml_path).to_json()
if (sorted(list(openapi_yaml_json['paths'].keys())) != sorted(list(tmp.path_dict.values()))):
print(f'{yaml_path} and {plugin_dir_name}/paths/__init__.py do not match. Load the later.')
openapi_yaml_json['paths'] = {path: openapi_yaml_json['paths'][path] for path in tmp.path_dict.values()}
new_yaml_file_path = os.path.join(plugin_file_path, 'tmp', 'openapi.yaml')
os.makedirs(os.path.dirname(new_yaml_file_path), exist_ok=True)
with open(new_yaml_file_path, 'w') as f:
yaml.safe_dump(openapi_yaml_json, f, sort_keys=False)
yaml_path = new_yaml_file_path
spec_model = SpecModel(yaml_path)
description = (spec_model.full_spec['info']['description'] if ('description' in spec_model.full_spec['info']) else 'No description.')
filename2endpoint = tmp.path_dict
endpoint2caller = {}
endpoint2output_model = defaultdict((lambda x: x))
for (fn, ep) in filename2endpoint.items():
tmp = _load_module(f'{plugin_dir_name}:{fn}:caller', os.path.join(plugin_file_path, 'paths', (fn + '.py')))
assert hasattr(tmp, 'call_api'), f'Missing function call_api in {fn}.py'
endpoint2caller[ep] = tmp.call_api
data_model_path = os.path.join(data_model_file_path, (fn + '.py'))
if (not os.path.exists(data_model_path)):
output_model = (lambda x: x)
else:
tmp = _load_module(f'{plugin_dir_name}:{fn}:output_model', data_model_path)
assert hasattr(tmp, 'convert'), f'Missing function convert in {fn}.py'
output_model = tmp.convert
endpoint2output_model[ep] = output_model
need_auth = (meta_info['manifest']['auth']['type'] not in [None, 'None', 'none', 'Null', 'null'])
return {'name': plugin_name, 'description': description, 'meta_info': meta_info, 'spec_model': spec_model, 'endpoint2caller': endpoint2caller, 'endpoint2output_model': endpoint2output_model, 'need_auth': need_auth} |
class FileUploader():
def __init__(self, stream=False):
self.total = 0
self.uploaded = 0
self.percent = 0
self.session = boto3.Session(aws_access_key_id=AWSKEY, aws_secret_access_key=AWSSECRET)
self.s3 = boto3.client('s3')
self.stream = stream
def upload_callback(self, size):
if (self.total == 0):
return
self.uploaded += size
percent = int(((self.uploaded / self.total) * 100))
if (percent > self.percent):
print('{} %'.format(int(((self.uploaded / self.total) * 100))))
self.percent = percent
def upload(self, bucket, key, file):
self.total = os.stat(file).st_size
if self.stream:
with open(file, 'rb') as data:
boto3.client('s3', aws_access_key_id=AWSKEY, aws_secret_access_key=AWSSECRET).upload_fileobj(data, bucket, key, Config=TransferConfig((5 * (1024 ** 3))), Callback=self.upload_callback)
else:
boto3.client('s3', aws_access_key_id=AWSKEY, aws_secret_access_key=AWSSECRET).upload_file(file, bucket, key, Config=TransferConfig((5 * (1024 ** 3))), Callback=self.upload_callback) |
class HoverXRefStandardDomainMixin(HoverXRefBaseDomain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
if (typ in self.hoverxref_types):
resolver = self._resolve_ref_xref
return resolver(env, fromdocname, builder, typ, target, node, contnode)
return super().resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
refnode = super()._resolve_ref_xref(env, fromdocname, builder, typ, target, node, contnode)
if (refnode is None):
return refnode
if any([self._is_ignored_ref(env, target), (not (env.config.hoverxref_auto_ref or (typ in self.hoverxref_types)))]):
return refnode
self._inject_hoverxref_data(env, refnode, typ)
return refnode
def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode):
refnode = super()._resolve_obj_xref(env, fromdocname, builder, typ, target, node, contnode)
if (refnode is None):
return refnode
if any([self._is_ignored_ref(env, target), (typ not in env.config.hoverxref_roles)]):
return refnode
self._inject_hoverxref_data(env, refnode, typ)
return refnode
def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
refnode = super()._resolve_numref_xref(env, fromdocname, builder, typ, target, node, contnode)
if (refnode is None):
return refnode
if any([self._is_ignored_ref(env, target), (typ not in env.config.hoverxref_roles)]):
return refnode
self._inject_hoverxref_data(env, refnode, typ)
return refnode |
def train(num_epochs, model, optimizer, train_loader, val_loader, fabric):
for epoch in range(num_epochs):
train_acc = torchmetrics.Accuracy(task='multiclass', num_classes=10).to(fabric.device)
model.train()
for (batch_idx, (features, targets)) in enumerate(train_loader):
model.train()
logits = model(features)
loss = F.cross_entropy(logits, targets)
optimizer.zero_grad()
fabric.backward(loss)
optimizer.step()
if (not (batch_idx % 300)):
print(f'Epoch: {(epoch + 1):04d}/{num_epochs:04d} | Batch {batch_idx:04d}/{len(train_loader):04d} | Loss: {loss:.4f}')
model.eval()
with torch.no_grad():
predicted_labels = torch.argmax(logits, 1)
train_acc.update(predicted_labels, targets)
model.eval()
with torch.no_grad():
val_acc = torchmetrics.Accuracy(task='multiclass', num_classes=10).to(fabric.device)
for (features, targets) in val_loader:
outputs = model(features)
predicted_labels = torch.argmax(outputs, 1)
val_acc.update(predicted_labels, targets)
fabric.print(f'Epoch: {(epoch + 1):04d}/{num_epochs:04d} | Train acc.: {(train_acc.compute() * 100):.2f}% | Val acc.: {(val_acc.compute() * 100):.2f}%')
(train_acc.reset(), val_acc.reset()) |
class RoIPointPool3d(nn.Module):
def __init__(self, num_sampled_points=512, pool_extra_width=1.0):
super().__init__()
self.num_sampled_points = num_sampled_points
self.pool_extra_width = pool_extra_width
def forward(self, points, point_features, boxes3d):
return RoIPointPool3dFunction.apply(points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points) |
class LoadNPYImaged(MapTransform):
def __init__(self, keys, allow_missing_keys: bool=False):
super().__init__(keys, allow_missing_keys)
self.keys = keys
def __call__(self, data):
d = dict(data)
data_npy = None
for key in data.keys():
file_path = d[key]
if (data_npy is None):
if ('imagesTr' in file_path):
file_path = file_path.replace('imagesTr/', '')
elif ('labelsTr' in key):
file_path = file_path.replace('labelsTr/', '')
file_path = file_path.replace('nii.gz', 'npy')
data_npy = np.load(file_path)
if (key == 'image'):
d[key] = data_npy[0:1]
else:
d[key] = data_npy[4]
return d |
class Solution(object):
def sumNumbers(self, root):
if (root is None):
return 0
res = 0
queue = [(root, root.val)]
while (len(queue) > 0):
(curr, curr_value) = queue.pop(0)
if ((curr.left is None) and (curr.right is None)):
res += curr_value
continue
if curr.left:
queue.append((curr.left, ((curr_value * 10) + curr.left.val)))
if curr.right:
queue.append((curr.right, ((curr_value * 10) + curr.right.val)))
return res |
def build_state_prediction_dataset(args):
playthroughs = (json.loads(line.rstrip(',\n')) for line in open(args.input) if (len(line.strip()) > 1))
graph_dataset = GraphDataset()
dataset = []
for example in next_example(playthroughs):
(root, candidates) = (example[0], example[1:])
if (len(candidates) < args.min_candidates):
continue
previous_graph = graph_dataset.compress(root['graph_{}'.format(args.graph_type)])
graph_choices = [graph_dataset.compress(candidate['graph_{}'.format(args.graph_type)]) for candidate in candidates]
for (i, candidate) in enumerate(candidates):
dataset.append({'game': candidate['game'], 'step': candidate['step'], 'action': candidate['action'], 'previous_graph': previous_graph, 'target_graph': graph_choices[i], 'graph_choices': graph_choices})
if (args.output is None):
args.output = (os.path.splitext(args.input)[0] + '.sp.{}.json'.format(args.graph_type))
data = {'graph_index': graph_dataset.dumps(), 'examples': dataset}
with open(args.output, 'w') as f:
json.dump(data, f)
if args.verbose:
print('This dataset has {:,} datapoints.'.format(len(dataset))) |
class JobListCategory(JobCategoryMenu, JobMixin, ListView):
paginate_by = 25
template_name = 'jobs/job_category_list.html'
def get_queryset(self):
self.current_category = get_object_or_404(JobCategory, slug=self.kwargs['slug'])
return Job.objects.visible().select_related().filter(category__slug=self.kwargs['slug'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_category'] = self.current_category
return context |
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
self.parser.add_argument('--gpu_ids', type=str, default='1', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--logs_dir', type=str, default='./logs', help='models are saved here')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--model', type=str, default='pvqvae', choices=['pvqvae', 'rand_tf', 'resnet2vq', 'bert2vq'], help='chooses which model to use.')
self.parser.add_argument('--ckpt', type=str, default=None, help='ckpt to load.')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--use_bin_sdf', type=str, default='0', help='use binarized sdf for training vox model or not')
self.parser.add_argument('--vq_model', type=str, default='vqvae', choices=['vqvae', 'pvqvae'], help='vqvae model to use.')
self.parser.add_argument('--vq_cfg', type=str, default='configs/pvqvae_snet.yaml', help='vqvae model config file')
self.parser.add_argument('--vq_dset', type=str, default=None, help='dataset vqvae originally trained on')
self.parser.add_argument('--vq_cat', type=str, default=None, help='for setting code dir in XXXCodeDataset.')
self.parser.add_argument('--vq_ckpt', type=str, default=None, help='vq ckpt to load.')
self.parser.add_argument('--vq_note', type=str, default='default', help='for different setting of p-vqvae. used in extract_code.py')
self.parser.add_argument('--tf_cfg', type=str, default='configs/rand_tf_snet_code.yaml', help='tf model config file')
self.parser.add_argument('--dataset_mode', type=str, default='snet', help='chooses how datasets are loaded. [mnist, snet, abc, snet-abc]')
self.parser.add_argument('--trunc_thres', type=float, default=0.2, help='threshold for truncated sdf. value will be: sdf=torch.clamp(sdf, -trunc_thres, trunc_thres)')
self.parser.add_argument('--iou_thres', type=float, default=0.0, help='threshold for computing 3d iou.')
self.parser.add_argument('--ratio', type=float, default=1.0, help='ratio of the dataset to use')
self.parser.add_argument('--cat', type=str, default='chair', help='category for shapenet')
self.parser.add_argument('--max_dataset_size', default=, type=int, help='chooses how datasets are loaded. [mnist, sdf, abc, snet-abc]')
self.parser.add_argument('--nThreads', default=9, type=int, help='# threads for loading data')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--pix3d_mode', type=str, default='noBG')
self.parser.add_argument('--snet_mode', type=str, default='noBG')
self.parser.add_argument('--use_marginal', type=str, default='0')
self.parser.add_argument('--resnet2vq_ckpt', type=str, default=None)
self.parser.add_argument('--resnet_model', type=str, default=None)
self.parser.add_argument('--resnet_cfg', type=str, default='configs/resnet2vq_pix3d.yaml', help='resnet2XX model config file')
self.parser.add_argument('--resnet_ckpt', type=str, default=None)
self.parser.add_argument('--resnet_arch', type=str, default='resnet18')
self.parser.add_argument('--resnet_norm', type=str, default='gn', choices=['bn', 'gn'])
self.parser.add_argument('--resnet_dset', type=str, help='resnet is trained on which dset')
self.parser.add_argument('--bert_cfg', type=str, default='configs/bert2vq_shapeglot.yaml', help='bert2VQ model config file')
self.parser.add_argument('--n_less', type=int, default=0, help='for less context')
self.parser.add_argument('--alpha', type=float, default=0.75, help='for less context')
self.parser.add_argument('--topk', type=int, default=30, help='for less context')
self.parser.add_argument('--debug', default='0', type=str, choices=['0', '1'], help='if true, debug mode')
self.parser.add_argument('--seed', default=111, type=int, help='seed')
self.parser.add_argument('--profiler', default='0', type=str, choices=['0', '1'], help='use profiler or not')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
self.opt.device = 'cuda'
if (self.opt.model in ['vqvae', 'pvqvae']):
configs = OmegaConf.load(self.opt.vq_cfg)
mparam = configs.model.params
lparam = configs.lossconfig.params
ddconfig = mparam.ddconfig
zdim = ddconfig.z_channels
name = ('%s-k%s-d%s-ch%s-z%s-codeW%s-lpipsW%s' % (self.opt.name, mparam.n_embed, mparam.embed_dim, ddconfig.ch, ddconfig.z_channels, lparam.codebook_weight, lparam.perceptual_weight))
elif (self.opt.model in ['rand_tf']):
configs = OmegaConf.load(self.opt.tf_cfg)
if self.opt.isTrain:
if ('transformer' in self.opt.model):
tf_arch = configs.model.arch
pe_conf = configs.pe
ntokens_tf = configs.model.params.ntokens
d_tf = configs.model.params.embed_dim
d_hid = configs.model.params.d_hid
nhead = configs.model.params.nhead
nlayers_enc = configs.model.params.nlayers_enc
nlayers_dec = configs.model.params.nlayers_dec
name = ('%s-arch-%s-k%s-tfDim%s-hidDim%s-nH%s-nEnc%s-nDec%s-posD%s-posInit%s' % (self.opt.name, tf_arch, ntokens_tf, d_tf, d_hid, nhead, nlayers_enc, nlayers_dec, pe_conf.pos_embed_dim, pe_conf.init_factor))
else:
name = self.opt.name
else:
name = self.opt.name
self.opt.name = name
self.opt.gpu_ids_str = self.opt.gpu_ids
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
if (len(self.opt.gpu_ids) > 0):
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print(' Options ')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print(' End ')
if self.opt.isTrain:
expr_dir = os.path.join(self.opt.logs_dir, self.opt.name)
utils.util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(' Options \n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write(' End \n')
tb_dir = ('%s/tboard' % expr_dir)
if (not os.path.exists(tb_dir)):
os.makedirs(tb_dir)
self.opt.tb_dir = tb_dir
writer = SummaryWriter(log_dir=tb_dir)
self.opt.writer = writer
return self.opt |
class CurricSampler(Sampler):
def __init__(self, data_source, num_samples_cls=1):
num_classes = len(np.unique(data_source.labels))
self.class_iter = RandomCycleIter(range(num_classes))
cls_data_list = [list() for _ in range(num_classes)]
for (i, label) in enumerate(data_source.labels):
cls_data_list[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in cls_data_list]
self.num_samples = (max([len(x) for x in cls_data_list]) * len(cls_data_list))
self.num_samples_cls = num_samples_cls
def __iter__(self):
return class_aware_sample_generator(self.class_iter, self.data_iter_list, self.num_samples, self.num_samples_cls)
def __len__(self):
return self.num_samples |
_families
def test_record_testsuite_property(pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str) -> None:
pytester.makepyfile('\n def test_func1(record_testsuite_property):\n record_testsuite_property("stats", "all good")\n\n def test_func2(record_testsuite_property):\n record_testsuite_property("stats", 10)\n ')
(result, dom) = run_and_parse(family=xunit_family)
assert (result.ret == 0)
node = dom.find_first_by_tag('testsuite')
properties_node = node.find_first_by_tag('properties')
p1_node = properties_node.find_nth_by_tag('property', 0)
p2_node = properties_node.find_nth_by_tag('property', 1)
p1_node.assert_attr(name='stats', value='all good')
p2_node.assert_attr(name='stats', value='10') |
def test_pipe_Bits():
B1 = mk_bits(1)
B32 = mk_bits(32)
run_tv_test(NormalQueueRTL(Bits32, 2), [[B1(1), B1(1), B32(123), B1(0), B1(0), '?'], [B1(1), B1(1), B32(345), B1(0), B1(1), B32(123)], [B1(0), B1(0), B32(567), B1(0), B1(1), B32(123)], [B1(0), B1(0), B32(567), B1(1), B1(1), B32(123)], [B1(0), B1(1), B32(567), B1(1), B1(1), B32(345)], [B1(1), B1(1), B32(567), B1(0), B1(0), '?'], [B1(1), B1(1), B32(0), B1(1), B1(1), B32(567)], [B1(1), B1(1), B32(1), B1(1), B1(1), B32(0)], [B1(1), B1(1), B32(2), B1(1), B1(1), B32(1)], [B1(0), B1(1), B32(2), B1(1), B1(1), B32(2)]]) |
class TimeElements():
def setup(self):
test_file_path = mm.datasets.get_path('bubenec')
self.df_buildings = gpd.read_file(test_file_path, layer='buildings')
self.df_tessellation = gpd.read_file(test_file_path, layer='tessellation')
self.df_streets = gpd.read_file(test_file_path, layer='streets')
self.df_streets['nID'] = range(len(self.df_streets))
self.limit = mm.buffered_limit(self.df_buildings, 50)
nx = mm.gdf_to_nx(self.df_streets)
(self.nodes, self.edges) = mm.nx_to_gdf(nx)
self.df_buildings['nID'] = mm.get_network_id(self.df_buildings, self.df_streets, 'nID')
def time_Tessellation(self):
mm.Tessellation(self.df_buildings, 'uID', self.limit, segment=2)
def time_Blocks(self):
mm.Blocks(self.df_tessellation, self.df_streets, self.df_buildings, 'bID', 'uID')
def time_get_network_id(self):
mm.get_network_id(self.df_buildings, self.df_streets, 'nID')
def time_get_node_id(self):
mm.get_node_id(self.df_buildings, self.nodes, self.edges, 'nodeID', 'nID') |
def test_majorana_operator_pow():
a = (MajoranaOperator((0, 1, 5), 1.5) + MajoranaOperator((1, 2, 7), (- 0.5)))
assert ((a ** 2).terms == {(): (- 2.5), (0, 2, 5, 7): (- 1.5)})
with pytest.raises(TypeError):
_ = (a ** (- 1))
with pytest.raises(TypeError):
_ = (a ** 'a') |
class OCSPResponseBuilder():
def __init__(self, response: (_SingleResponse | None)=None, responder_id: (tuple[(x509.Certificate, OCSPResponderEncoding)] | None)=None, certs: (list[x509.Certificate] | None)=None, extensions: list[x509.Extension[x509.ExtensionType]]=[]):
self._response = response
self._responder_id = responder_id
self._certs = certs
self._extensions = extensions
def add_response(self, cert: x509.Certificate, issuer: x509.Certificate, algorithm: hashes.HashAlgorithm, cert_status: OCSPCertStatus, this_update: datetime.datetime, next_update: (datetime.datetime | None), revocation_time: (datetime.datetime | None), revocation_reason: (x509.ReasonFlags | None)) -> OCSPResponseBuilder:
if (self._response is not None):
raise ValueError('Only one response per OCSPResponse.')
singleresp = _SingleResponse(cert, issuer, algorithm, cert_status, this_update, next_update, revocation_time, revocation_reason)
return OCSPResponseBuilder(singleresp, self._responder_id, self._certs, self._extensions)
def responder_id(self, encoding: OCSPResponderEncoding, responder_cert: x509.Certificate) -> OCSPResponseBuilder:
if (self._responder_id is not None):
raise ValueError('responder_id can only be set once')
if (not isinstance(responder_cert, x509.Certificate)):
raise TypeError('responder_cert must be a Certificate')
if (not isinstance(encoding, OCSPResponderEncoding)):
raise TypeError('encoding must be an element from OCSPResponderEncoding')
return OCSPResponseBuilder(self._response, (responder_cert, encoding), self._certs, self._extensions)
def certificates(self, certs: typing.Iterable[x509.Certificate]) -> OCSPResponseBuilder:
if (self._certs is not None):
raise ValueError('certificates may only be set once')
certs = list(certs)
if (len(certs) == 0):
raise ValueError('certs must not be an empty list')
if (not all((isinstance(x, x509.Certificate) for x in certs))):
raise TypeError('certs must be a list of Certificates')
return OCSPResponseBuilder(self._response, self._responder_id, certs, self._extensions)
def add_extension(self, extval: x509.ExtensionType, critical: bool) -> OCSPResponseBuilder:
if (not isinstance(extval, x509.ExtensionType)):
raise TypeError('extension must be an ExtensionType')
extension = x509.Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return OCSPResponseBuilder(self._response, self._responder_id, self._certs, [*self._extensions, extension])
def sign(self, private_key: CertificateIssuerPrivateKeyTypes, algorithm: (hashes.HashAlgorithm | None)) -> OCSPResponse:
if (self._response is None):
raise ValueError('You must add a response before signing')
if (self._responder_id is None):
raise ValueError('You must add a responder_id before signing')
return ocsp.create_ocsp_response(OCSPResponseStatus.SUCCESSFUL, self, private_key, algorithm)
def build_unsuccessful(cls, response_status: OCSPResponseStatus) -> OCSPResponse:
if (not isinstance(response_status, OCSPResponseStatus)):
raise TypeError('response_status must be an item from OCSPResponseStatus')
if (response_status is OCSPResponseStatus.SUCCESSFUL):
raise ValueError('response_status cannot be SUCCESSFUL')
return ocsp.create_ocsp_response(response_status, None, None, None) |
def find_cuda():
cuda_home = (os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH'))
if ((cuda_home is not None) and os.path.isfile(os.path.join(cuda_home, 'bin', 'nvcc'))):
return cuda_home
location = shutil.which('nvcc')
if (location is not None):
cuda_home = os.path.join(os.path.dirname(location), '..')
if (((cuda_home is not None) and os.path.isfile(os.path.join(cuda_home, 'lib64', 'libcudart.so'))) or os.path.isfile(os.path.join(cuda_home, 'lib', 'libcudart.so'))):
return cuda_home
raise RuntimeError('Cannot find CUDA toolkit. Please install it first.') |
class DynamicOITest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project(validate_objectdb=True)
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def test_simple_dti(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg):\n return eval("arg")\n a_var = a_func(a_func)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
self.assertEqual(pymod['a_func'].get_object(), pymod['a_var'].get_object())
def test_module_dti(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
code = dedent(' import mod1\n def a_func(arg):\n return eval("arg")\n a_var = a_func(mod1)\n ')
mod2.write(code)
self.pycore.run_module(mod2).wait_process()
pymod2 = self.project.get_pymodule(mod2)
self.assertEqual(self.project.get_pymodule(mod1), pymod2['a_var'].get_object())
def test_class_from_another_module_dti(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
code1 = dedent(' class AClass(object):\n pass\n ')
code2 = dedent(' from mod1 import AClass\n\n def a_func(arg):\n return eval("arg")\n a_var = a_func(AClass)\n ')
mod1.write(code1)
mod2.write(code2)
self.pycore.run_module(mod2).wait_process()
pymod2 = self.project.get_pymodule(mod2)
self.assertEqual(pymod2['AClass'].get_object(), pymod2['a_var'].get_object())
def test_class_dti(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class AClass(object):\n pass\n\n def a_func(arg):\n return eval("arg")\n a_var = a_func(AClass)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
self.assertEqual(pymod['AClass'].get_object(), pymod['a_var'].get_object())
def test_instance_dti(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class AClass(object):\n pass\n\n def a_func(arg):\n return eval("arg()")\n a_var = a_func(AClass)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
self.assertEqual(pymod['AClass'].get_object(), pymod['a_var'].get_object().get_type())
def test_method_dti(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class AClass(object):\n def a_method(self, arg):\n return eval("arg()")\n an_instance = AClass()\n a_var = an_instance.a_method(AClass)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
self.assertEqual(pymod['AClass'].get_object(), pymod['a_var'].get_object().get_type())
def test_function_argument_dti(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg):\n pass\n a_func(a_func)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pyscope = self.project.get_pymodule(mod).get_scope()
self.assertEqual(pyscope['a_func'].get_object(), pyscope.get_scopes()[0]['arg'].get_object())
def test_classes_with_the_same_name(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg):\n class AClass(object):\n pass\n return eval("arg")\n class AClass(object):\n pass\n a_var = a_func(AClass)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
self.assertEqual(pymod['AClass'].get_object(), pymod['a_var'].get_object())
def test_nested_classes(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func():\n class AClass(object):\n pass\n return AClass\n def another_func(arg):\n return eval("arg")\n a_var = another_func(a_func())\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pyscope = self.project.get_pymodule(mod).get_scope()
self.assertEqual(pyscope.get_scopes()[0]['AClass'].get_object(), pyscope['a_var'].get_object())
def test_function_argument_dti2(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg, a_builtin_type):\n pass\n a_func(a_func, [])\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pyscope = self.project.get_pymodule(mod).get_scope()
self.assertEqual(pyscope['a_func'].get_object(), pyscope.get_scopes()[0]['arg'].get_object())
def test_dti_and_concluded_data_invalidation(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg):\n return eval("arg")\n a_var = a_func(a_func)\n ')
mod.write(code)
pymod = self.project.get_pymodule(mod)
pymod['a_var'].get_object()
self.pycore.run_module(mod).wait_process()
self.assertEqual(pymod['a_func'].get_object(), pymod['a_var'].get_object())
def test_list_objects_and_dynamicoi(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C(object):\n pass\n def a_func(arg):\n return eval("arg")\n a_var = a_func([C()])[0]\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEqual(c_class, a_var.get_type())
def test_for_loops_and_dynamicoi(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C(object):\n pass\n def a_func(arg):\n return eval("arg")\n for c in a_func([C()]):\n a_var = c\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEqual(c_class, a_var.get_type())
def test_dict_objects_and_dynamicoi(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C(object):\n pass\n def a_func(arg):\n return eval("arg")\n a_var = a_func({1: C()})[1]\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEqual(c_class, a_var.get_type())
def test_dict_keys_and_dynamicoi(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C(object):\n pass\n def a_func(arg):\n return eval("arg")\n a_var = list(a_func({C(): 1}))[0]\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c_class = pymod['C'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEqual(c_class, a_var.get_type())
def test_dict_keys_and_dynamicoi2(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C1(object):\n pass\n class C2(object):\n pass\n def a_func(arg):\n return eval("arg")\n a, b = a_func((C1(), C2()))\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEqual(c1_class, a_var.get_type())
self.assertEqual(c2_class, b_var.get_type())
def test_strs_and_dynamicoi(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg):\n return eval("arg")\n a_var = a_func("hey")\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
a_var = pymod['a_var'].get_object()
self.assertTrue(isinstance(a_var.get_type(), rope.base.builtins.Str))
def test_textual_transformations(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C(object):\n pass\n def f():\n pass\n a_var = C()\n a_list = [C()]\n a_str = "hey"\n a_file = open("file.txt")\n ')
mod.write(code)
to_pyobject = rope.base.oi.transform.TextualToPyObject(self.project)
to_textual = rope.base.oi.transform.PyObjectToTextual(self.project)
pymod = self.project.get_pymodule(mod)
def complex_to_textual(pyobject):
return to_textual.transform(to_pyobject.transform(to_textual.transform(pyobject)))
test_variables = [('C', ('defined', 'mod.py', 'C')), ('f', ('defined', 'mod.py', 'f')), ('a_var', ('instance', ('defined', 'mod.py', 'C'))), ('a_list', ('builtin', 'list', ('instance', ('defined', 'mod.py', 'C')))), ('a_str', ('builtin', 'str')), ('a_file', ('builtin', 'file'))]
test_cases = [(pymod[v].get_object(), r) for (v, r) in test_variables]
test_cases += [(pymod, ('defined', 'mod.py')), (rope.base.builtins.builtins['enumerate'].get_object(), ('builtin', 'function', 'enumerate'))]
for (var, result) in test_cases:
self.assertEqual(to_textual.transform(var), result)
self.assertEqual(complex_to_textual(var), result)
def test_arguments_with_keywords(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C1(object):\n pass\n class C2(object):\n pass\n def a_func(arg):\n return eval("arg")\n a = a_func(arg=C1())\n b = a_func(arg=C2())\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEqual(c1_class, a_var.get_type())
self.assertEqual(c2_class, b_var.get_type())
def test_a_function_with_different_returns(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C1(object):\n pass\n class C2(object):\n pass\n def a_func(arg):\n return eval("arg")\n a = a_func(C1())\n b = a_func(C2())\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEqual(c1_class, a_var.get_type())
self.assertEqual(c2_class, b_var.get_type())
def test_a_function_with_different_returns2(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C1(object):\n pass\n class C2(object):\n pass\n def a_func(p):\n if p == C1:\n return C1()\n else:\n return C2()\n a = a_func(C1)\n b = a_func(C2)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEqual(c1_class, a_var.get_type())
self.assertEqual(c2_class, b_var.get_type())
def test_ignoring_star_args(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C1(object):\n pass\n class C2(object):\n pass\n def a_func(p, *args):\n if p == C1:\n return C1()\n else:\n return C2()\n a = a_func(C1, 1)\n b = a_func(C2, 2)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEqual(c1_class, a_var.get_type())
self.assertEqual(c2_class, b_var.get_type())
def test_ignoring_double_star_args(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' class C1(object):\n pass\n class C2(object):\n pass\n def a_func(p, *kwds, **args):\n if p == C1:\n return C1()\n else:\n return C2()\n a = a_func(C1, kwd=1)\n b = a_func(C2, kwd=2)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
pymod = self.project.get_pymodule(mod)
c1_class = pymod['C1'].get_object()
c2_class = pymod['C2'].get_object()
a_var = pymod['a'].get_object()
b_var = pymod['b'].get_object()
self.assertEqual(c1_class, a_var.get_type())
self.assertEqual(c2_class, b_var.get_type())
def test_invalidating_data_after_changing(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def a_func(arg):\n return eval("arg")\n a_var = a_func(a_func)\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
mod.write(code.replace('a_func', 'newfunc'))
mod.write(code)
pymod = self.project.get_pymodule(mod)
self.assertNotEqual(pymod['a_func'].get_object(), pymod['a_var'].get_object())
def test_invalidating_data_after_moving(self):
mod2 = testutils.create_module(self.project, 'mod2')
mod2.write('class C(object):\n pass\n')
mod = testutils.create_module(self.project, 'mod')
code = dedent(' import mod2\n def a_func(arg):\n return eval(arg)\n a_var = a_func("mod2.C")\n ')
mod.write(code)
self.pycore.run_module(mod).wait_process()
mod.move('newmod.py')
pymod = self.project.get_module('newmod')
pymod2 = self.project.get_pymodule(mod2)
self.assertEqual(pymod2['C'].get_object(), pymod['a_var'].get_object()) |
class DrawMap():
def __init__(self, map, window):
self.window = window
fullMesh = np.array([], dtype=np.float32).reshape(0, 3, 3)
fullMeshColors = np.array([], dtype=np.float32).reshape(0, 3, 4)
for i in range(0, map.num_city_blocks):
for j in range(0, map.num_city_blocks):
(mesh, meshColors) = self.building_vert_face(map.building_north[(0, i)], map.building_east[(0, j)], map.building_width, map.building_height[(i, j)])
fullMesh = np.concatenate((fullMesh, mesh), axis=0)
fullMeshColors = np.concatenate((fullMeshColors, meshColors), axis=0)
self.ground_mesh = gl.GLMeshItem(vertexes=fullMesh, vertexColors=fullMeshColors, drawEdges=True, smooth=False, computeNormals=False)
self.ground_mesh.setGLOptions('translucent')
self.window.addItem(self.ground_mesh)
def update(self, map):
fullMesh = np.array([], dtype=np.float32).reshape(0, 3, 3)
fullMeshColors = np.array([], dtype=np.float32).reshape(0, 3, 4)
for i in range(0, map.num_city_blocks):
for j in range(0, map.num_city_blocks):
(mesh, meshColors) = self.building_vert_face(map.building_north[(0, i)], map.building_east[(0, j)], map.building_width, map.building_height[(i, j)])
fullMesh = np.concatenate((fullMesh, mesh), axis=0)
fullMeshColors = np.concatenate((fullMeshColors, meshColors), axis=0)
self.ground_mesh.setData(vertexes=fullMesh, vertexColors=fullMeshColors)
def building_vert_face(self, n, e, width, height):
points = np.array([[(e + (width / 2)), (n + (width / 2)), 0], [(e + (width / 2)), (n - (width / 2)), 0], [(e - (width / 2)), (n - (width / 2)), 0], [(e - (width / 2)), (n + (width / 2)), 0], [(e + (width / 2)), (n + (width / 2)), height], [(e + (width / 2)), (n - (width / 2)), height], [(e - (width / 2)), (n - (width / 2)), height], [(e - (width / 2)), (n + (width / 2)), height]])
mesh = np.array([[points[0], points[3], points[4]], [points[7], points[3], points[4]], [points[0], points[1], points[5]], [points[0], points[4], points[5]], [points[1], points[2], points[6]], [points[1], points[5], points[6]], [points[3], points[2], points[6]], [points[3], points[7], points[6]], [points[4], points[7], points[5]], [points[7], points[5], points[6]]])
red = np.array([1.0, 0.0, 0.0, 1])
green = np.array([0.0, 1.0, 0.0, 1])
blue = np.array([0.0, 0.0, 1.0, 1])
yellow = np.array([1.0, 1.0, 0.0, 1])
meshColors = np.empty((10, 3, 4), dtype=np.float32)
meshColors[0] = green
meshColors[1] = green
meshColors[2] = green
meshColors[3] = green
meshColors[4] = green
meshColors[5] = green
meshColors[6] = green
meshColors[7] = green
meshColors[8] = yellow
meshColors[9] = yellow
return (mesh, meshColors) |
def test_state_transition():
lock_amount = 7
block_number = 1
initiator = factories.make_address()
pseudo_random_generator = random.Random()
channels = make_channel_set([channel_properties2])
from_transfer = make_target_transfer(channels[0], amount=lock_amount, initiator=initiator)
init = ActionInitTarget(from_hop=channels.get_hop(0), transfer=from_transfer, balance_proof=from_transfer.balance_proof, sender=from_transfer.balance_proof.sender)
init_transition = target.state_transition(target_state=None, state_change=init, channel_state=channels[0], pseudo_random_generator=pseudo_random_generator, block_number=block_number)
assert (init_transition.new_state is not None)
assert (init_transition.new_state.from_hop == channels.get_hop(0))
assert (init_transition.new_state.transfer == from_transfer)
first_new_block = Block(block_number=(block_number + 1), gas_limit=1, block_hash=factories.make_transaction_hash())
first_block_iteration = target.state_transition(target_state=init_transition.new_state, state_change=first_new_block, channel_state=channels[0], pseudo_random_generator=pseudo_random_generator, block_number=first_new_block.block_number)
secret_reveal = ReceiveSecretReveal(secret=UNIT_SECRET, sender=initiator)
reveal_iteration = target.state_transition(target_state=first_block_iteration.new_state, state_change=secret_reveal, channel_state=channels[0], pseudo_random_generator=pseudo_random_generator, block_number=first_new_block.block_number)
assert reveal_iteration.events
second_new_block = Block(block_number=(block_number + 2), gas_limit=1, block_hash=factories.make_transaction_hash())
iteration = target.state_transition(target_state=init_transition.new_state, state_change=second_new_block, channel_state=channels[0], pseudo_random_generator=pseudo_random_generator, block_number=second_new_block.block_number)
assert (not iteration.events)
balance_proof = create(BalanceProofSignedStateProperties(nonce=(from_transfer.balance_proof.nonce + 1), transferred_amount=lock_amount, locked_amount=0, canonical_identifier=factories.make_canonical_identifier(token_network_address=channels[0].token_network_address, channel_identifier=channels.get_hop(0).channel_identifier), locksroot=LOCKSROOT_OF_NO_LOCKS, message_hash=(b'\x00' * 32)))
balance_proof_state_change = ReceiveUnlock(message_identifier=random.randint(0, UINT64_MAX), secret=UNIT_SECRET, balance_proof=balance_proof, sender=balance_proof.sender)
proof_iteration = target.state_transition(target_state=init_transition.new_state, state_change=balance_proof_state_change, channel_state=channels[0], pseudo_random_generator=pseudo_random_generator, block_number=(block_number + 2))
assert (proof_iteration.new_state is None) |
.usefixtures('cmdline_opts')
class ChecksumCLSrcSink_Tests():
def setup_class(cls):
cls.DutType = ChecksumCL
def run_sim(s, th):
run_sim(th, s.__class__.cmdline_opts)
def test_srcsink_simple(s):
words = [b16(x) for x in [1, 2, 3, 4, 5, 6, 7, 8]]
bits = words_to_b128(words)
result = b32(7864356)
src_msgs = [bits]
sink_msgs = [result]
th = TestHarness(s.DutType, src_msgs, sink_msgs)
s.run_sim(th)
def test_srcsink_pipeline(s):
words0 = [b16(x) for x in [1, 2, 3, 4, 5, 6, 7, 8]]
words1 = [b16(x) for x in [8, 7, 6, 5, 4, 3, 2, 1]]
bits0 = words_to_b128(words0)
bits1 = words_to_b128(words1)
result0 = b32(7864356)
result1 = b32()
src_msgs = [bits0, bits1, bits0, bits1]
sink_msgs = [result0, result1, result0, result1]
th = TestHarness(s.DutType, src_msgs, sink_msgs)
s.run_sim(th)
def test_srcsink_backpressure(s):
words0 = [b16(x) for x in [1, 2, 3, 4, 5, 6, 7, 8]]
words1 = [b16(x) for x in [8, 7, 6, 5, 4, 3, 2, 1]]
result0 = b32(7864356)
result1 = b32()
bits0 = words_to_b128(words0)
bits1 = words_to_b128(words1)
src_msgs = [bits0, bits1, bits0, bits1]
sink_msgs = [result0, result1, result0, result1]
th = TestHarness(s.DutType, src_msgs, sink_msgs)
th.set_param('top.sink.construct', initial_delay=10)
s.run_sim(th)
(input_msgs=st.lists(st.lists(pm_st.bits(16), min_size=8, max_size=8)), src_init=st.integers(0, 10), src_intv=st.integers(0, 3), sink_init=st.integers(0, 10), sink_intv=st.integers(0, 3))
(deadline=None, max_examples=50)
def test_srcsink_hypothesis(s, input_msgs, src_init, src_intv, sink_init, sink_intv):
src_msgs = [words_to_b128(words) for words in input_msgs]
sink_msgs = [checksum(words) for words in input_msgs]
th = TestHarness(s.DutType, src_msgs, sink_msgs)
th.set_param('top.src.construct', initial_delay=src_init, interval_delay=src_intv)
th.set_param('top.sink.construct', initial_delay=sink_init, interval_delay=sink_intv)
s.run_sim(th) |
_datapipe('shuffled_flatmap')
class ShuffledFlatMapperIterDataPipe(IterDataPipe):
datapipe: IterDataPipe
fn: Optional[Callable]
buffer_size: int
_buffer: List[Iterator]
_enabled: bool
_seed: Optional[int]
_rng: random.Random
_no_op_fn: bool = False
def __init__(self, datapipe: IterDataPipe, fn: Optional[Callable]=None, input_col=None, buffer_size: int=100) -> None:
super().__init__()
self._buffer = []
self.datapipe = datapipe
if (fn is None):
fn = _no_op_fn
self._no_op_fn = True
_check_unpickable_fn(fn)
self.fn = fn
self.input_col = input_col
validate_input_col(fn, input_col)
assert (buffer_size > 0), 'buffer_size should be larger than 0'
self.buffer_size = buffer_size
self._enabled = True
self._seed = None
self._rng = random.Random()
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
return self
def reset(self) -> None:
self._buffer = []
if self._enabled:
if (self._seed is None):
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
self._rng.seed(self._seed)
self._seed = None
def _apply_fn(self, data):
if (self.input_col is None):
return self.fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple((data[col] for col in self.input_col))
return self.fn(*args)
else:
return self.fn(data[self.input_col])
def __iter__(self) -> Iterator[T_co]:
if (not self._enabled):
for x in self.datapipe:
(yield from self._apply_fn(x))
else:
idx = self._rng.randint(0, (self.buffer_size - 1))
for x in self.datapipe:
while (len(self._buffer) == self.buffer_size):
try:
(yield next(self._buffer[idx]))
idx = self._rng.randint(0, (self.buffer_size - 1))
except StopIteration:
self._buffer.pop(idx)
self._buffer.append(iter(self._apply_fn(x)))
while self._buffer:
try:
idx = self._rng.randint(0, (len(self._buffer) - 1))
(yield next(self._buffer[idx]))
except StopIteration:
self._buffer.pop(idx)
def __len__(self) -> int:
if self._no_op_fn:
return sum(map(len, self.datapipe))
raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.")
def __getstate__(self):
state = (self.datapipe, self.fn, self.input_col, self.buffer_size, self._buffer, self._enabled, self._seed, self._rng.getstate(), self._valid_iterator_id, self._number_of_samples_yielded)
if (IterDataPipe.getstate_hook is not None):
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(self.datapipe, self.fn, self.input_col, self.buffer_size, self._buffer, self._enabled, self._seed, rng_state, self._valid_iterator_id, self._number_of_samples_yielded) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
def __del__(self):
self._buffer.clear() |
def test_read_write(tmpdir):
tif1 = str(tmpdir.join('test.tif'))
tif2 = str(tmpdir.join('test2.tif'))
with rasterio.open('tests/data/RGB.byte.tif') as src:
kwargs = src.meta.copy()
del kwargs['transform']
del kwargs['crs']
with rasterio.open(tif1, 'w', **kwargs) as dst:
dst.write(src.read())
with rasterio.open(tif1) as src, rasterio.open(tif2, 'w', **src.meta) as dst:
dst.write(src.read()) |
def main() -> int:
checkers = {'git': check_git, 'vcs': check_vcs_conflict, 'spelling': check_spelling, 'pyqt-imports': check_pyqt_imports, 'userscript-descriptions': check_userscripts_descriptions, 'userscript-shebangs': check_userscript_shebangs, 'changelog-urls': check_changelog_urls, 'vim-modelines': check_vim_modelines}
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='Show checked filenames')
parser.add_argument('checker', choices=(list(checkers) + ['all']), help='Which checker to run.')
args = parser.parse_args()
if (args.checker == 'all'):
retvals = []
for (name, checker) in checkers.items():
utils.print_title(name)
retvals.append(checker(args))
return (0 if all(retvals) else 1)
checker = checkers[args.checker]
ok = checker(args)
return (0 if ok else 1) |
def eval_base_encoder(dataset, device):
print('Evaluating base encoder...')
base_encoder = torchvision.models.resnet152(pretrained=True)
base_encoder.fc = nn.Identity()
cars_encoder = CarsEncoder(base_encoder)
cars_encoder.to(device=device)
cars_encoder.eval()
result = Quaterion.evaluate(evaluator=Evaluator(metrics=RetrievalRPrecision(), sampler=GroupSampler(sample_size=1000, device=device, log_progress=True)), model=SimilarityModel(encoders=cars_encoder, head=EmptyHead(cars_encoder.embedding_size)), dataset=dataset)
print(result) |
def test_load_security_information_api_returns_none(initialized_db, set_secscan_config):
repository_ref = registry_model.lookup_repository('devtable', 'simple')
tag = registry_model.get_repo_tag(repository_ref, 'latest')
manifest = registry_model.get_manifest_for_tag(tag)
ManifestSecurityStatus.create(manifest=manifest._db_id, repository=repository_ref._db_id, error_json={}, index_status=IndexStatus.COMPLETED, indexer_hash='abc', indexer_version=IndexerVersion.V4, metadata_json={})
secscan = V4SecurityScanner(application, instance_keys, storage)
secscan._secscan_api = mock.Mock()
secscan._secscan_api.vulnerability_report.return_value = None
assert (secscan.load_security_information(manifest).status == ScanLookupStatus.NOT_YET_INDEXED) |
class JoinGroupCall(Scaffold):
async def join_group_call(self, chat_id: Union[(int, str)], stream: Optional[Stream]=None, invite_hash: Optional[str]=None, join_as=None, auto_start: bool=True):
if (join_as is None):
join_as = self._cache_local_peer
chat_id = (await self._resolve_chat_id(chat_id))
self._cache_user_peer.put(chat_id, join_as)
if (self._app is None):
raise NoMTProtoClientSet()
if (not self._is_running):
raise ClientNotStarted()
chat_call = (await self._app.get_full_chat(chat_id))
if (chat_call is None):
if auto_start:
(await self._app.create_group_call(chat_id))
else:
raise NoActiveGroupCall()
media_description = (await StreamParams.get_stream_params(stream))
try:
call_params: str = (await ToAsync(self._binding.create_call, chat_id, media_description))
result_params = (await self._app.join_group_call(chat_id, call_params, invite_hash, (media_description.video is None), self._cache_user_peer.get(chat_id)))
(await ToAsync(self._binding.connect, chat_id, result_params))
participants = (await self._app.get_group_call_participants(chat_id))
for x in participants:
if (x.user_id == BridgedClient.chat_id(self._cache_local_peer)):
self._need_unmute[chat_id] = x.muted_by_admin
except FileError:
raise FileNotFoundError()
except ConnectionError:
raise AlreadyJoinedError()
except InvalidParams:
raise UnMuteNeeded()
except Exception:
raise TelegramServerError() |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default='nateraw/image-folder', metadata={'help': 'Name of a dataset from the datasets package'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the training data.'})
validation_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the validation data.'})
train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
def __post_init__(self):
data_files = dict()
if (self.train_dir is not None):
data_files['train'] = self.train_dir
if (self.validation_dir is not None):
data_files['val'] = self.validation_dir
self.data_files = (data_files if data_files else None) |
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
if opt.train_from:
logger.info(('Loading checkpoint from %s' % opt.train_from))
checkpoint = torch.load(opt.train_from, map_location=(lambda storage, loc: storage))
logger.info(('Loading vocab from checkpoint at %s.' % opt.train_from))
vocab = checkpoint['vocab']
else:
vocab = torch.load((opt.data + '.vocab.pt'))
if old_style_vocab(vocab):
fields = load_old_vocab(vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
if (len(opt.data_ids) > 1):
train_shards = []
for train_id in opt.data_ids:
shard_base = ('train_' + train_id)
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if (opt.data_ids[0] is not None):
shard_base = ('train_' + opt.data_ids[0])
else:
shard_base = 'train'
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if (opt.world_size > 1):
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore((opt.world_size * opt.queue_size))
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info((' Starting process pid: %d ' % procs[device_id].pid))
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer, args=(train_iter, queues, semaphore, opt), daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif (nb_gpu == 1):
single_main(opt, 0)
else:
single_main(opt, (- 1)) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, required=True)
parser.add_argument('--annot-path', type=str, required=True)
parser.add_argument('--det-stride', type=float, default=1)
parser.add_argument('--in-scale', type=float, default=None)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--no-mask', action='store_true', default=False)
parser.add_argument('--no-class-mapping', action='store_true', default=False)
parser.add_argument('--cpu-pre', action='store_true', default=False)
parser.add_argument('--dynamic-schedule', action='store_true', default=False)
parser.add_argument('--out-dir', type=str, required=True)
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--weights', type=str, required=True)
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts |
def test_create_org_policy(initialized_db, app):
with client_with_identity('devtable', app) as cl:
response = conduct_api_call(cl, OrgAutoPrunePolicies, 'POST', {'orgname': 'sellnsmall'}, {'method': 'creation_date', 'value': '2w'}, 201).json
assert (response['uuid'] is not None)
assert (model.autoprune.get_namespace_autoprune_policy('sellnsmall', response['uuid']) is not None)
org = model.organization.get_organization('sellnsmall')
assert model.autoprune.namespace_has_autoprune_task(org.id)
logs = list(get_latest_logs_query(performer='devtable', namespace='sellnsmall'))
log_kinds = get_log_entry_kinds()
log = None
for l in logs:
if (l.kind == log_kinds['create_namespace_autoprune_policy']):
log = l
break
assert (log is not None)
assert (json.loads(log.metadata_json)['method'] == 'creation_date')
assert (json.loads(log.metadata_json)['value'] == '2w')
assert (json.loads(log.metadata_json)['namespace'] == 'sellnsmall') |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
freeze_feature_extractor: Optional[bool] = field(default=True, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
verbose_logging: Optional[bool] = field(default=False, metadata={'help': 'Whether to log verbose messages or not.'})
max_gumbel_temperature: Optional[float] = field(default=2.0, metadata={'help': 'Maximum temperature for gumbel softmax.'})
min_gumbel_temperature: Optional[float] = field(default=0.5, metadata={'help': 'Minimum temperature for gumbel softmax.'})
gumbel_temperature_decay: Optional[float] = field(default=0.999995, metadata={'help': 'Decay of gumbel temperature during training.'}) |
_flags(compute_test_value='raise')
.parametrize('dist_op, dist_params, size', [(normal, [np.array(1.0, dtype=config.floatX), np.array(5.0, dtype=config.floatX)], []), (normal, [np.array([0.0, 1.0], dtype=config.floatX), np.array(5.0, dtype=config.floatX)], []), (normal, [np.array([0.0, 1.0], dtype=config.floatX), np.array(5.0, dtype=config.floatX)], [3, 2]), (multivariate_normal, [np.array([[0], [10], [100]], dtype=config.floatX), np.diag(np.array([1e-06], dtype=config.floatX))], [2, 3, 3]), (dirichlet, [np.array([[100, 1, 1], [1, 100, 1], [1, 1, 100]], dtype=config.floatX)], [2, 3, 3]), (multinomial, [np.array([10, 20], dtype='int64'), np.array([[0.999, 0.001], [0.001, 0.999]], dtype=config.floatX)], [3, 2])])
def test_local_rv_size_lift(dist_op, dist_params, size):
rng = shared(np.random.default_rng(1233532), borrow=False)
(new_out, f_inputs, dist_st, f_rewritten) = apply_local_rewrite_to_rv(local_rv_size_lift, (lambda rv: rv), dist_op, dist_params, size, rng)
assert (pt.get_vector_length(new_out.owner.inputs[1]) == 0) |
class BaseOptimisationWrapper(object):
def __init__(self, pywr_model_json, *args, **kwargs):
uid = kwargs.pop('uid', None)
self.pywr_model_klass = kwargs.pop('model_klass', Model)
self.pywr_model_kwargs = kwargs.pop('model_kwargs', {})
super(BaseOptimisationWrapper, self).__init__(*args, **kwargs)
self.pywr_model_json = pywr_model_json
if (uid is None):
uid = uuid.uuid4().hex
self.uid = uid
self.run_stats = None
def _cached(self):
global MODEL_CACHE
try:
cache = MODEL_CACHE[self.uid]
except KeyError:
model = self.make_model()
model.setup()
cache = ModelCache()
cache.model = model
(cache.variables, cache.variable_map) = cache_variable_parameters(model)
cache.objectives = cache_objectives(model)
cache.constraints = cache_constraints(model)
MODEL_CACHE[self.uid] = cache
return cache
def model(self):
return self._cached.model
def model_variables(self):
return self._cached.variables
def model_variable_map(self):
return self._cached.variable_map
def model_objectives(self):
return self._cached.objectives
def model_constraints(self):
return self._cached.constraints
def make_model(self):
m = self.pywr_model_klass.load(self.pywr_model_json, **self.pywr_model_kwargs)
self.customise_model(m)
return m
def customise_model(self, model):
pass |
def install_jetson_clocks(args):
if (not os.path.isfile('/usr/bin/jetson_clocks')):
shutil.copy('tests/jetson_clocks', '/usr/bin/jetson_clocks')
print('Copied test/jetson_clocks')
else:
print('/usr/bin/jetson_clocks already exists')
pytest.exit('I cannot install a fake jetson_clocks! jetson_clocks already exist') |
def validate_op_return_output(output: TxOutput, *, max_size: int=None) -> None:
script = output.scriptpubkey
if (script[0] != opcodes.OP_RETURN):
raise UserFacingException(_('Only OP_RETURN scripts are supported.'))
if ((max_size is not None) and (len(script) > max_size)):
raise UserFacingException(_((('OP_RETURN payload too large.' + '\n') + f'(scriptpubkey size {len(script)} > {max_size})')))
if (output.value != 0):
raise UserFacingException(_('Amount for OP_RETURN output must be zero.')) |
class HookError(RadishError):
def __init__(self, hook_function, failure):
self.hook_function = hook_function
self.failure = failure
super(HookError, self).__init__("Hook '{0}' from {1}:{2} raised: '{3}: {4}'".format(hook_function.__name__, hook_function.__code__.co_filename, hook_function.__code__.co_firstlineno, failure.name, failure.reason)) |
def test_cannot_manage_subscription_if_not_subscribed_via_stripe(graphql_client):
membership = MembershipFactory(status=MembershipStatus.ACTIVE)
graphql_client.force_login(membership.user)
query = 'mutation {\n manageUserSubscription {\n __typename\n }\n }'
response = graphql_client.query(query, variables={})
assert (response['data']['manageUserSubscription']['__typename'] == 'NotSubscribedViaStripe') |
def test_export_methods_handle_empty_data_error(simple_project, mocker):
mocker.patch.object(simple_project, '_call_api', return_value='\n')
dataframe = simple_project.export_records(format_type='df')
assert dataframe.empty
dataframe = simple_project.export_instrument_event_mappings(format_type='df')
assert dataframe.empty
dataframe = simple_project.export_repeating_instruments_events(format_type='df')
assert dataframe.empty
dataframe = simple_project.export_metadata(format_type='df')
assert dataframe.empty |
class OUT2(Block):
_format = [E(1, 4, x_fixed(b'OUT2'), dummy=True), E(6, 28, x_date_time), E(30, 34, 'a5'), E(36, 38, 'a3'), E(40, 43, 'a4'), E(45, 55, 'f11.3')]
time = Timestamp.T()
station = String.T(help='station code (5 characters)')
channel = String.T(help='channel code (3 characters)')
location = String.T(default='', optional=True, help='location code (aux_id, 4 characters)')
duration = Float.T() |
class UserPushShowPvar(BaseHandler):
.authenticated
async def post(self, userid):
try:
user = (await self.db.user.get(userid, fields=('role', 'email')))
envs = {}
for (k, _) in self.request.body_arguments.items():
envs[k] = self.get_body_argument(k)
mail = envs['adminmail']
pwd = envs['adminpwd']
if ((await self.db.user.challenge_MD5(mail, pwd)) and (user['email'] == mail)):
key = (await self.db.user.get(userid, fields=('barkurl', 'skey', 'wxpusher', 'qywx_token', 'tg_token', 'dingding_token', 'qywx_webhook')))
log = u'BarkUrl :{bark}\r\nSendkey :{skey}\r\nWxPusher :{wxpusher}\r\n Pusher :{qywx_token}\r\nTg Bot :{tg_token}\r\nDingDing Bot :{dingding_token}\r\n WebHook : {qywx_webhook}'.format(bark=key['barkurl'], skey=key['skey'], wxpusher=key['wxpusher'], qywx_token=key['qywx_token'], tg_token=key['tg_token'], dingding_token=key['dingding_token'], qywx_webhook=key['qywx_webhook'])
(await self.render('utils_run_result.html', log=log, title=u'', flg='success'))
return
else:
raise Exception(u'/')
except Exception as e:
if config.traceback_print:
traceback.print_exc()
if (str(e).find('get user need id or email') > (- 1)):
e = u'/'
(await self.render('tpl_run_failed.html', log=str(e)))
logger_Web_Handler.error('UserID: %s show Push_settings failed! Reason: %s', (userid or '-1'), str(e))
return |
class Effect172(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Energy Turret')), 'damageMultiplier', (container.getModifiedItemAttr('damageMultiplierBonus') * level), **kwargs) |
def _add_run_pair_metric_page(report_index_file, pair_output_paths, pair_name, pair_data_frame, pair_report_data_list):
pair_index_path = pair_output_paths.index_path
out_dir = pair_output_paths.output_paths.out_dir
report_index_file.write(('<a href="%s">%s</a><br>\n' % (os.path.relpath(pair_index_path, out_dir), pair_name)))
with open(pair_index_path, 'w') as pair_index_file:
_write_header(pair_index_file, pair_name)
_add_run_pair_table(pair_index_file, pair_data_frame, pair_report_data_list)
_add_run_pair_metric_pages(pair_index_file, pair_output_paths, pair_data_frame) |
class QQP(Task):
VERSION = 0
DATASET_PATH = 'glue'
DATASET_NAME = 'qqp'
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
if (self._training_docs is None):
self._training_docs = list(self.dataset['train'])
return self._training_docs
def validation_docs(self):
return self.dataset['validation']
def doc_to_text(self, doc):
return 'Question 1: {}\nQuestion 2: {}\nQuestion: Do both questions ask the same thing?\nAnswer:'.format(doc['question1'], doc['question2'])
def doc_to_target(self, doc):
return ' {}'.format(yesno(doc['label']))
def construct_requests(self, doc, ctx):
(ll_yes, _) = rf.loglikelihood(ctx, ' yes')
(ll_no, _) = rf.loglikelihood(ctx, ' no')
return (ll_yes, ll_no)
def process_results(self, doc, results):
(ll_yes, ll_no) = results
gold = doc['label']
pred = (ll_yes > ll_no)
return {'acc': (pred == gold), 'f1': (gold, pred)}
def higher_is_better(self):
return {'acc': True, 'f1': True}
def aggregation(self):
return {'acc': mean, 'f1': f1_score} |
def test_newtype_structure_hooks(converter: BaseConverter):
assert (converter.structure('0', int) == 0)
assert (converter.structure('0', PositiveIntNewType) == 0)
assert (converter.structure('0', BigPositiveIntNewType) == 0)
converter.register_structure_hook(PositiveIntNewType, (lambda v, _: (int(v) if (int(v) > 0) else (1 / 0))))
with pytest.raises(ZeroDivisionError):
converter.structure('0', PositiveIntNewType)
assert (converter.structure('1', PositiveIntNewType) == 1)
with pytest.raises(ZeroDivisionError):
converter.structure('0', BigPositiveIntNewType)
converter.register_structure_hook(BigPositiveIntNewType, (lambda v, _: (int(v) if (int(v) > 50) else (1 / 0))))
with pytest.raises(ZeroDivisionError):
converter.structure('1', BigPositiveIntNewType)
assert (converter.structure('1', PositiveIntNewType) == 1)
assert (converter.structure('51', BigPositiveIntNewType) == 51) |
def test_difference() -> None:
v = Version.parse('1.2.3')
assert v.difference(v).is_empty()
assert (v.difference(Version.parse('0.8.0')) == v)
assert v.difference(VersionRange(Version.parse('1.1.4'), Version.parse('1.2.4'))).is_empty()
assert (v.difference(VersionRange(Version.parse('1.4.0'), Version.parse('3.0.0'))) == v) |
class Effect6597(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Skirmish Command') or mod.item.requiresSkill('Armored Command'))), 'warfareBuff2Value', src.getModifiedItemAttr('shipBonusCarrierG4'), skill='Gallente Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Skirmish Command') or mod.item.requiresSkill('Armored Command'))), 'warfareBuff3Value', src.getModifiedItemAttr('shipBonusCarrierG4'), skill='Gallente Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Skirmish Command') or mod.item.requiresSkill('Armored Command'))), 'warfareBuff4Value', src.getModifiedItemAttr('shipBonusCarrierG4'), skill='Gallente Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Skirmish Command') or mod.item.requiresSkill('Armored Command'))), 'buffDuration', src.getModifiedItemAttr('shipBonusCarrierG4'), skill='Gallente Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Skirmish Command') or mod.item.requiresSkill('Armored Command'))), 'warfareBuff1Value', src.getModifiedItemAttr('shipBonusCarrierG4'), skill='Gallente Carrier', **kwargs) |
.unit()
.parametrize(('outcome', 'outcome_enum', 'total_description'), ([(outcome, TaskOutcome, 'description') for outcome in TaskOutcome] + [(outcome, CollectionOutcome, 'description') for outcome in CollectionOutcome]))
def test_create_summary_panel(capsys, outcome, outcome_enum, total_description):
counts = {out: 0 for out in outcome_enum}
counts[outcome] = 1
panel = create_summary_panel(counts, outcome_enum, total_description)
console.print(panel)
captured = capsys.readouterr().out
assert (' Summary ' in captured)
assert (('' in captured) or ('' in captured))
assert (('' in captured) or ('' in captured))
assert (outcome.description in captured)
assert ('description' in captured) |
def setup(parser):
parser.add_squirrel_selection_arguments()
parser.add_squirrel_query_arguments(without=['time'])
style_choices = ['visual', 'summary', 'yaml']
parser.add_argument('--style', dest='style', choices=style_choices, default='visual', help=('Set style of presentation. Choices: %s' % ldq(style_choices))) |
class BuildNoProvenanceUsageTests(CustomAssertions):
def setUpClass(cls):
cls.das = DummyArtifacts()
cls.tempdir = cls.das.tempdir
cls.pm = PluginManager()
def tearDownClass(cls):
cls.das.free()
def test_build_no_provenance_node_usage_w_complete_node(self):
ns = NamespaceCollections()
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
uuid = self.das.table_v0.uuid
dag = self.das.table_v0.dag
v0_node = dag.get_node_data(uuid)
build_no_provenance_node_usage(v0_node, uuid, ns, cfg)
out_var_name = '<feature_table_frequency_0>'
self.assertEqual(ns.usg_var_namespace, {uuid: out_var_name})
rendered = cfg.use.render()
self.assertREAppearsOnlyOnce(rendered, 'nodes have no provenance')
header = '# Original Node ID String Description'
self.assertREAppearsOnlyOnce(rendered, header)
exp_v0 = f'# {uuid} _feature_table_frequency_0_'
self.assertRegex(rendered, exp_v0)
def test_build_no_provenance_node_usage_uuid_only_node(self):
ns = NamespaceCollections()
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
uuid = 'some-uuid'
node = None
build_no_provenance_node_usage(node, uuid, ns, cfg)
out_var_name = '<no-provenance-node_0>'
self.assertEqual(ns.usg_var_namespace, {uuid: out_var_name})
rendered = cfg.use.render()
self.assertREAppearsOnlyOnce(rendered, 'nodes have no provenance')
header = '# Original Node ID String Description'
self.assertREAppearsOnlyOnce(rendered, header)
exp_v0 = f'# {uuid} _no_provenance_node_0_'
self.assertRegex(rendered, exp_v0)
def test_build_no_provenance_node_usage_many(self):
ns = NamespaceCollections()
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
uuid = self.das.table_v0.uuid
dag = self.das.table_v0.dag
v0_node = dag.get_node_data(uuid)
dummy_node_uuid = (uuid + '-dummy')
dummy_node = dag.get_node_data(uuid)
build_no_provenance_node_usage(v0_node, uuid, ns, cfg)
build_no_provenance_node_usage(dummy_node, dummy_node_uuid, ns, cfg)
self.assertIn(uuid, ns.usg_var_namespace)
self.assertIn(dummy_node_uuid, ns.usg_var_namespace)
self.assertEqual(ns.usg_var_namespace[uuid], '<feature_table_frequency_0>')
self.assertEqual(ns.usg_var_namespace[dummy_node_uuid], '<feature_table_frequency_1>')
rendered = cfg.use.render()
self.assertREAppearsOnlyOnce(rendered, 'nodes have no provenance')
header = '# Original Node ID String Description'
self.assertREAppearsOnlyOnce(rendered, header)
exp_og = f'# {uuid} _feature_table_frequency_0_'
exp_dummy = f'# {uuid}-dummy _feature_table_frequency_1_'
self.assertRegex(rendered, exp_og)
self.assertRegex(rendered, exp_dummy) |
class Project(_Project):
def __init__(self, projectroot, fscommands=None, ropefolder='.ropeproject', **prefs):
if (projectroot != '/'):
projectroot = _realpath(projectroot).rstrip('/\\')
assert isinstance(projectroot, str)
self._address = projectroot
self._ropefolder_name = ropefolder
if (not os.path.exists(self._address)):
os.mkdir(self._address)
elif (not os.path.isdir(self._address)):
raise exceptions.RopeError('Project root exists and is not a directory')
if (fscommands is None):
fscommands = rope.base.fscommands.create_fscommands(self._address)
super().__init__(fscommands)
self.ignored = _ResourceMatcher()
self.file_list = _FileListCacher(self)
self._init_prefs(prefs)
if (ropefolder is not None):
self.prefs.add('ignored_resources', ropefolder)
self._init_source_folders()
def __repr__(self):
return '<{}.{} "{}">'.format(self.__class__.__module__, self.__class__.__name__, self.address)
('Delete once deprecated functions are gone')
def _init_source_folders(self):
for path in self.prefs.get('source_folders', []):
folder = self.get_resource(path)
self._custom_source_folders.append(folder)
def get_files(self):
return self.file_list.get_files()
def get_python_files(self):
return [resource for resource in self.get_files() if self.pycore.is_python_file(resource)]
def _get_resource_path(self, name):
return os.path.join(self._address, *name.split('/'))
def _init_ropefolder(self):
if ((self.ropefolder is not None) and (not self.ropefolder.exists())):
self._create_recursively(self.ropefolder)
def _create_recursively(self, folder):
if ((folder.parent != self.root) and (not folder.parent.exists())):
self._create_recursively(folder.parent)
folder.create()
def _init_prefs(self, prefs):
config = get_config(self.root, self.ropefolder).parse()
self.prefs = config
self.prefs.add_callback('ignored_resources', self.ignored.set_patterns)
self.ignored.set_patterns(self.prefs.ignored_resources)
for (key, value) in prefs.items():
self.prefs.set(key, value)
self._init_other_parts()
self._init_ropefolder()
if config.project_opened:
config.project_opened(self)
def _init_other_parts(self):
self.pycore
def is_ignored(self, resource):
return self.ignored.does_match(resource)
def sync(self):
self.close()
def close(self):
self.data_files.write()
def set(self, key, value):
self.prefs.set(key, value)
def ropefolder(self):
if (self._ropefolder_name is not None):
return self.get_folder(self._ropefolder_name)
def validate(self, folder=None):
if (folder is None):
folder = self.root
super().validate(folder)
root = property((lambda self: self.get_resource('')))
address = property((lambda self: self._address)) |
class EnrSpace(Space):
_stored_dims = {}
def __init__(self, dims, excitations):
self.dims = tuple(dims)
self.n_excitations = excitations
enr_dicts = enr_state_dictionaries(dims, excitations)
(self.size, self.state2idx, self.idx2state) = enr_dicts
self.issuper = False
self.superrep = None
self._pure_dims = False
def __eq__(self, other):
return ((self is other) or ((type(other) is type(self)) and (self.dims == other.dims) and (self.n_excitations == other.n_excitations)))
def __hash__(self):
return hash((self.dims, self.n_excitations))
def __repr__(self):
return f'EnrSpace({self.dims}, {self.n_excitations})'
def as_list(self):
return list(self.dims)
def dims2idx(self, dims):
return self.state2idx[tuple(dims)]
def idx2dims(self, idx):
return self.idx2state[idx] |
.parametrize('masked, secrets', [(_secrets, _secrets), ((re.compile('token.+?(?=\\s|$)'), re.compile('secret.+?(?=\\s|$)')), _secrets)])
def test_multiple_secrets_with_same_mask(masked, secrets):
masker = MaskingFilter(_use_named_masks=True)
for mask in masked:
masker.add_mask_for(mask, 'ksam')
test_str = ' '.join(secrets)
assert (masker.mask(test_str) == ' '.join(("<'ksam' (value removed)>" for _ in secrets))) |
class QuantileReg(Glm):
GLM_LOSS_CLASS = Quantile
def __init__(self, X, y, fit_intercept=True, sample_weight=None, offsets=None, quantile=0.5):
super().__init__(X=X, y=y, fit_intercept=fit_intercept, sample_weight=sample_weight, offsets=offsets, quantile=quantile)
def intercept_at_coef_eq0(self):
values = (self.y if (self.offsets is None) else (self.y - self.offsets))
return weighted_quantile(values=values, axis=0, sample_weight=self.sample_weight, q=self.loss_kws['quantile']) |
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
if tokenizer.check('IN'):
tokenizer.read()
return Op('in')
elif tokenizer.check('NOT'):
tokenizer.read()
tokenizer.expect('WS', expected="whitespace after 'not'")
tokenizer.expect('IN', expected="'in' after 'not'")
return Op('not in')
elif tokenizer.check('OP'):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error('Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in') |
class TestParseEntryPoints():
.parametrize(('script', 'expected'), [pytest.param('', [], id='empty'), pytest.param('\n [foo]\n foo = foo.bar\n ', [], id='unrelated'), pytest.param('\n [console_scripts]\n package = package.__main__:package\n ', [('package', 'package.__main__', 'package', 'console')], id='cli'), pytest.param('\n [gui_scripts]\n package = package.__main__:package\n ', [('package', 'package.__main__', 'package', 'gui')], id='gui'), pytest.param('\n [console_scripts]\n magic-cli = magic.cli:main\n\n [gui_scripts]\n magic-gui = magic.gui:main\n ', [('magic-cli', 'magic.cli', 'main', 'console'), ('magic-gui', 'magic.gui', 'main', 'gui')], id='cli-and-gui')])
def test_valid(self, script, expected):
iterable = parse_entrypoints(textwrap.dedent(script))
assert (list(iterable) == expected), expected |
class PreparedBuild(object):
def __init__(self, trigger=None):
self._dockerfile_id = None
self._archive_url = None
self._tags = None
self._build_name = None
self._subdirectory = None
self._context = None
self._metadata = None
self._trigger = trigger
self._is_manual = None
def get_display_name(sha):
return sha[0:7]
def name_from_sha(self, sha):
self.build_name = PreparedBuild.get_display_name(sha)
def is_manual(self):
if (self._is_manual is None):
raise Exception('Property is_manual not set')
return self._is_manual
_manual.setter
def is_manual(self, value):
if (self._is_manual is not None):
raise Exception('Property is_manual already set')
self._is_manual = value
def trigger(self):
return self._trigger
def archive_url(self):
return self._archive_url
_url.setter
def archive_url(self, value):
if self._archive_url:
raise Exception('Property archive_url already set')
self._archive_url = value
def dockerfile_id(self):
return self._dockerfile_id
_id.setter
def dockerfile_id(self, value):
if self._dockerfile_id:
raise Exception('Property dockerfile_id already set')
self._dockerfile_id = value
def tags(self):
if (not self._tags):
raise Exception('Missing property tags')
return self._tags
def tags(self, value):
if self._tags:
raise Exception('Property tags already set')
self._tags = [escape_tag(tag, default='latest') for tag in value]
def build_name(self):
if (not self._build_name):
raise Exception('Missing property build_name')
return self._build_name
_name.setter
def build_name(self, value):
if self._build_name:
raise Exception('Property build_name already set')
self._build_name = value
def subdirectory(self):
if (self._subdirectory is None):
raise Exception('Missing property subdirectory')
return self._subdirectory
def subdirectory(self, value):
if self._subdirectory:
raise Exception('Property subdirectory already set')
self._subdirectory = value
def context(self):
if (self._context is None):
raise Exception('Missing property context')
return self._context
def context(self, value):
if self._context:
raise Exception('Property context already set')
self._context = value
def metadata(self):
if (self._metadata is None):
raise Exception('Missing property metadata')
return self._metadata
def metadata(self, value):
if self._metadata:
raise Exception('Property metadata already set')
self._metadata = value |
def run(settings):
settings.description = 'Default train settings for DiMP with ResNet50 as backbone.'
settings.batch_size = 10
settings.num_workers = 8
settings.multi_gpu = False
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
settings.search_area_factor = 5.0
settings.output_sigma_factor = (1 / 4)
settings.target_filter_sz = 4
settings.feature_sz = 18
settings.output_sz = (settings.feature_sz * 16)
settings.center_jitter_factor = {'train': 3, 'test': 4.5}
settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}
settings.hinge_threshold = 0.05
input_dtype = 'rgbcolormap'
coco_train = MSCOCOSeq_depth(settings.env.cocodepth_dir, dtype=input_dtype)
lasot_depth_train = Lasot_depth(root=settings.env.lasotdepth_dir, dtype=input_dtype)
depthtrack_train = DepthTrack(root=settings.env.depthtrack_dir, split='train', dtype=input_dtype)
depthtrack_val = DepthTrack(root=settings.env.depthtrack_dir, split='val', dtype=input_dtype)
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
output_sigma = (settings.output_sigma_factor / settings.search_area_factor)
proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}
label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}
data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_train, joint_transform=transform_joint)
data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_val, joint_transform=transform_joint)
dataset_train = sampler.DiMPSampler([coco_train, lasot_depth_train, depthtrack_train], [1, 1, 1], samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_train)
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1)
dataset_val = sampler.DiMPSampler([depthtrack_val], [1], samples_per_epoch=5000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_val)
loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)
net = dimpnet.dimp50_DeT(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=(output_sigma * settings.feature_sz), num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu', merge_type='max')
if settings.multi_gpu:
net = MultiGPU(net, dim=1)
objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}
loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}
actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight)
optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-05}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 0.0005}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-05}, {'params': actor.net.bb_regressor.parameters()}, {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-05}, {'params': actor.net.feature_extractor_depth.parameters(), 'lr': 2e-05}], lr=0.0002)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)
trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)
trainer.train(100, load_latest=True, fail_safe=True) |
def Xception71(num_classes=None, global_pool=True, keep_prob=0.5, output_stride=None, regularize_depthwise=False, multi_grid=None, scope='xception_71'):
blocks = [xception_block('entry_flow/block1', in_channels=64, depth_list=[128, 128, 128], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('entry_flow/block2', in_channels=128, depth_list=[256, 256, 256], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=1), xception_block('entry_flow/block3', in_channels=256, depth_list=[256, 256, 256], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('entry_flow/block4', in_channels=256, depth_list=[728, 728, 728], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=1), xception_block('entry_flow/block5', in_channels=728, depth_list=[728, 728, 728], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('middle_flow/block1', in_channels=728, depth_list=[728, 728, 728], skip_connection_type='sum', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=16, stride=1), xception_block('exit_flow/block1', in_channels=728, depth_list=[728, 1024, 1024], skip_connection_type='conv', activation_fn_in_separable_conv=False, regularize_depthwise=regularize_depthwise, num_units=1, stride=2), xception_block('exit_flow/block2', in_channels=1024, depth_list=[1536, 1536, 2048], skip_connection_type='none', activation_fn_in_separable_conv=True, regularize_depthwise=regularize_depthwise, num_units=1, stride=1, unit_rate_list=multi_grid)]
return Xception(blocks=blocks, num_classes=num_classes, global_pool=global_pool, keep_prob=keep_prob, output_stride=output_stride, scope=scope) |
class TestInferenceDropout(unittest.TestCase):
def setUp(self):
(self.task, self.parser) = get_dummy_task_and_parser()
TransformerModel.add_args(self.parser)
self.args = self.parser.parse_args([])
self.args.encoder_layers = 2
self.args.decoder_layers = 1
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_sets_inference_dropout_to_true(self):
self.args.retain_dropout = True
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert self.transformer_model.decoder.dropout_module.apply_during_inference
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.apply_during_inference
def test_inference_dropout_false_by_default(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert (not self.transformer_model.encoder.dropout_module.apply_during_inference)
assert (not self.transformer_model.decoder.dropout_module.apply_during_inference)
for layer in self.transformer_model.encoder.layers:
assert (not layer.dropout_module.apply_during_inference)
for layer in self.transformer_model.decoder.layers:
assert (not layer.dropout_module.apply_during_inference)
def test_applies_training_mode(self):
self.transformer_model = TransformerModel.build_model(self.args, self.task)
assert self.transformer_model.encoder.dropout_module.training
for layer in self.transformer_model.encoder.layers:
assert layer.dropout_module.training
self.transformer_model.eval()
assert (not self.transformer_model.decoder.dropout_module.training)
for layer in self.transformer_model.encoder.layers:
assert (not layer.dropout_module.training)
def test_retain_modules(self):
self.args.retain_dropout = True
self.args.retain_dropout_modules = ['TransformerEncoder', 'TransformerEncoderLayer']
self.transformer_model = TransformerModel.build_model(self.args, self.task)
cfg = convert_namespace_to_omegaconf(self.args)
self.transformer_model.prepare_for_inference_(cfg)
assert self.transformer_model.encoder.dropout_module.apply_during_inference
assert (not self.transformer_model.decoder.dropout_module.apply_during_inference)
for layer in self.transformer_model.decoder.layers:
assert (not layer.dropout_module.apply_during_inference) |
class BilmDataset(Dataset):
def worker(self, proc_id, start, end):
print(('Worker %d is building dataset ... ' % proc_id))
set_seed(self.seed)
pos = 0
f_write = open((('dataset-tmp-' + str(proc_id)) + '.pt'), 'wb')
with open(self.corpus_path, mode='r', encoding='utf-8') as f:
while (pos < start):
try:
f.readline()
except:
continue
finally:
pos += 1
while True:
try:
line = f.readline()
except:
continue
finally:
pos += 1
src = [self.vocab.get(w) for w in self.tokenizer.tokenize(line)]
if (len(src) < 1):
continue
tgt_forward = (src[1:] + [SEP_ID])
tgt_backward = ([CLS_ID] + src[:(- 1)])
seg = ([1] * len(src))
if (len(src) >= self.seq_length):
src = src[:self.seq_length]
tgt_forward = tgt_forward[:self.seq_length]
tgt_backward = tgt_backward[:self.seq_length]
seg = seg[:self.seq_length]
else:
while (len(src) != self.seq_length):
src.append(PAD_ID)
tgt_forward.append(PAD_ID)
tgt_backward.append(PAD_ID)
seg.append(PAD_ID)
pickle.dump((src, tgt_forward, tgt_backward, seg), f_write)
if (pos >= (end - 1)):
break
f_write.close() |
def stderr_for_metric(metric, bootstrap_iters):
bootstrappable = [median, matthews_corrcoef, f1_score, perplexity, bleu, chrf, ter]
if (metric in bootstrappable):
return (lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters))
stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
return stderr.get(metric, None) |
def get_value_from_params_dir(params_dir, param_name):
def _load_params(params_file, loader, mode):
with tf.io.gfile.GFile(params_file, mode) as f:
params = loader(f)
logging.info('Found params file %s', params_file)
return params[param_name]
try:
try:
return _load_params(os.path.join(params_dir, 'params.json'), json.load, 'r')
except tf.errors.NotFoundError:
logging.info('%s does not exist in %s', 'params.json', params_dir)
try:
return _load_params(os.path.join(params_dir, 'params.pkl'), pkl.load, 'rb')
except tf.errors.NotFoundError:
logging.info('%s does not exist in %s', 'params.pkl', params_dir)
except KeyError:
logging.info('The params file does not have the key %s', param_name)
return None |
class InnerPrepareSingleFactorization(Bloq):
num_aux: int
num_spin_orb: int
num_bits_state_prep: int
num_bits_rot_aa: int = 8
adjoint: bool = False
kp1: int = 1
kp2: int = 1
def pretty_name(self) -> str:
dag = ('' if self.adjoint else '')
return f'In-Prep{dag}'
_property
def signature(self) -> Signature:
n = ((self.num_spin_orb // 2) - 1).bit_length()
return Signature.build(l=self.num_aux.bit_length(), p=n, q=n, succ_pq=1)
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
n = ((self.num_spin_orb // 2) - 1).bit_length()
cost_up_tri = (Toffoli(), (((6 * n) + (2 * self.num_bits_rot_aa)) - 7))
cost_contg_indx = (ToContiguousIndex(n, (2 * n)), 1)
cost_qroam = (QROAMTwoRegs((self.num_aux + 1), (((self.num_spin_orb ** 2) // 8) + (self.num_spin_orb // 4)), self.kp1, self.kp2, (((2 * n) + self.num_bits_state_prep) + 2), adjoint=self.adjoint), 1)
cost_ineq = (LessThanEqual(self.num_bits_state_prep, self.num_bits_state_prep), 1)
cost_swap = (CSwap((2 * n)), 1)
return {cost_up_tri, cost_contg_indx, cost_qroam, cost_ineq, cost_swap} |
def test_autoload_commands(command_sets_app):
(cmds_cats, cmds_doc, cmds_undoc, help_topics) = command_sets_app._build_command_info()
assert ('Alone' in cmds_cats)
assert ('elderberry' in cmds_cats['Alone'])
assert ('main' in cmds_cats['Alone'])
result = command_sets_app.app_cmd('main sub')
assert ('Subcommand Ran' in result.stdout)
assert ('Also Alone' in cmds_cats)
assert ('durian' in cmds_cats['Also Alone'])
assert ('Fruits' in cmds_cats)
assert ('cranberry' in cmds_cats['Fruits'])
assert ('Command Set B' not in cmds_cats) |
class AboutDialog(Gtk.AboutDialog):
def __init__(self, parent, app):
super().__init__()
self.set_transient_for(parent)
self.set_program_name(app.name)
self.set_version(quodlibet.get_build_description())
self.set_authors(const.AUTHORS)
self.set_artists(const.ARTISTS)
self.set_logo_icon_name(app.icon_name)
self.set_comments(app.description)
self.set_license_type(Gtk.License.GPL_2_0)
self.set_translator_credits('\n'.join(const.TRANSLATORS))
self.set_website(const.WEBSITE)
self.set_copyright(f'''{const.COPYRIGHT}
{const.SUPPORT_EMAIL}''') |
def test_instrument_before_after_run() -> None:
record = []
class BeforeAfterRun(_abc.Instrument):
def before_run(self) -> None:
record.append('before_run')
def after_run(self) -> None:
record.append('after_run')
async def main() -> None:
pass
_core.run(main, instruments=[BeforeAfterRun()])
assert (record == ['before_run', 'after_run']) |
('pypyr.moduleloader.get_module')
('pypyr.cache.loadercache.Loader.get_pipeline')
def test_get_parsed_context_parser_pass(mock_get_pipeline, mock_moduleloader):
contextparser_cache.clear()
mock_moduleloader.return_value.get_parsed_context = mock_parser_arb
mock_get_pipeline.return_value = get_pipe_def({'context_parser': 'specifiedparserhere'})
pipeline = Pipeline('arb', context_args='in arg here')
context = Context()
pipeline.run(context)
mock_moduleloader.assert_called_once_with('specifiedparserhere')
mock_get_pipeline.assert_called_once_with(name='arb', parent=None)
assert isinstance(context, Context)
assert (len(context) == 2)
assert (context['key1'] == 'created in mock parser')
assert (context['key2'] == 'in arg here') |
def G_logistic_nonsaturating(G, D, opt, training_set, minibatch_size):
latents = tf.random_normal(([minibatch_size] + G.input_shapes[0][1:]))
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = tf.nn.softplus((- fake_scores_out))
return loss |
class LdjsonReaderListsTest(Ldjson, ReaderTest, TestCase):
input_data = '[1,2,3]\n[4,5,6]'
()
def test_nofields(self, context):
context.write_sync(EMPTY)
context.stop()
assert (context.get_buffer() == [([1, 2, 3],), ([4, 5, 6],)])
(output_type=tuple)
def test_output_type(self, context):
context.write_sync(EMPTY)
context.stop()
assert (context.get_buffer() == [([1, 2, 3],), ([4, 5, 6],)]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.