code stringlengths 281 23.7M |
|---|
_config
def test_tile_add_on_top(manager):
manager.c.next_layout()
manager.c.next_layout()
manager.test_window('one')
manager.test_window('two')
manager.test_window('three')
assert (manager.c.layout.info()['master'] == ['one'])
assert (manager.c.layout.info()['slave'] == ['two', 'three'])
manager.c.layout.previous()
assert_focused(manager, 'two')
manager.test_window('four')
assert (manager.c.layout.info()['clients'] == ['one', 'two', 'four', 'three'])
assert (manager.c.layout.info()['slave'] == ['two', 'four', 'three'])
assert_focus_path(manager, 'three', 'one', 'two', 'four') |
def test_QobjEvo_step_coeff():
coeff1 = np.random.rand(6)
coeff2 = (np.random.rand(6) + (np.random.rand(6) * 1j))
tlist = np.array([2, 3, 4, 5, 6, 7], dtype=float)
qobjevo = QobjEvo([[sigmaz(), coeff1], [sigmax(), coeff2]], tlist=tlist, order=0)
assert (qobjevo(2.0)[(0, 0)] == coeff1[0])
assert (qobjevo(7.0)[(0, 0)] == coeff1[5])
assert (qobjevo(5.0001)[(0, 0)] == coeff1[3])
assert (qobjevo(3.9999)[(0, 0)] == coeff1[1])
assert (qobjevo(2.0)[(0, 1)] == coeff2[0])
assert (qobjevo(7.0)[(0, 1)] == coeff2[5])
assert (qobjevo(5.0001)[(0, 1)] == coeff2[3])
assert (qobjevo(3.9999)[(0, 1)] == coeff2[1])
tlist = np.array([1, 2, 4, 5, 6, 8], dtype=float)
qobjevo = QobjEvo([[sigmaz(), coeff1], [sigmax(), coeff2]], tlist=tlist, order=0)
assert (qobjevo(1.0)[(0, 0)] == coeff1[0])
assert (qobjevo(8.0)[(0, 0)] == coeff1[5])
assert (qobjevo(3.9999)[(0, 0)] == coeff1[1])
assert (qobjevo(4.23)[(0, 0)] == coeff1[2])
assert (qobjevo(1.23)[(0, 0)] == coeff1[0])
assert (qobjevo(1.0)[(0, 1)] == coeff2[0])
assert (qobjevo(8.0)[(0, 1)] == coeff2[5])
assert (qobjevo(6.7)[(0, 1)] == coeff2[4])
assert (qobjevo(7.9999)[(0, 1)] == coeff2[4])
assert (qobjevo(3.9999)[(0, 1)] == coeff2[1]) |
_procedure('default-error-display-handler', [values_string.W_String, values.W_Object], simple=False)
def default_error_display_handler(msg, exn_object, env, cont):
from pycket.prims.input_output import current_error_param, return_void
port = current_error_param.get(cont)
assert isinstance(port, values.W_OutputPort)
if is_exn(exn_object):
port.write(('%s : %s\n' % (exn_object.struct_type().name.tostring(), msg.tostring())))
else:
port.write(('exception : %s\n' % msg.tostring()))
if (not is_user_exn(exn_object)):
display_stack_trace(port, cont)
return return_void(env, cont) |
class TestGPUTorchConnector(QiskitMachineLearningTestCase, TestTorchConnector):
def setUp(self):
super().setup_test()
super().setUp()
import torch
if (not torch.cuda.is_available()):
self.skipTest('CUDA is not available')
else:
self._device = torch.device('cuda') |
class ConvNetwork(LayersPowered, Serializable):
def __init__(self, name, input_shape, output_dim, conv_filters, conv_filter_sizes, conv_strides, conv_pads, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (input_layer is not None):
l_in = input_layer
l_hid = l_in
elif (len(input_shape) == 3):
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name='input')
l_hid = L.reshape(l_in, (([0],) + input_shape), name='reshape_input')
elif (len(input_shape) == 2):
l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name='input')
input_shape = ((1,) + input_shape)
l_hid = L.reshape(l_in, (([0],) + input_shape), name='reshape_input')
else:
l_in = L.InputLayer(shape=((None,) + input_shape), input_var=input_var, name='input')
l_hid = l_in
if batch_normalization:
l_hid = L.batch_norm(l_hid)
for (idx, conv_filter, filter_size, stride, pad) in zip(range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads):
l_hid = L.Conv2DLayer(l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name=('conv_hidden_%d' % idx), weight_normalization=weight_normalization)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
if (output_nonlinearity == L.spatial_expected_softmax):
assert (len(hidden_sizes) == 0)
assert (output_dim == (conv_filters[(- 1)] * 2))
l_hid.nonlinearity = tf.identity
l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
else:
l_hid = L.flatten(l_hid, name='conv_flatten')
for (idx, hidden_size) in enumerate(hidden_sizes):
l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name=('hidden_%d' % idx), W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization)
if batch_normalization:
l_hid = L.batch_norm(l_hid)
l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name='output', W=output_W_init, b=output_b_init, weight_normalization=weight_normalization)
if batch_normalization:
l_out = L.batch_norm(l_out)
self._l_in = l_in
self._l_out = l_out
LayersPowered.__init__(self, l_out)
def input_layer(self):
return self._l_in
def output_layer(self):
return self._l_out
def input_var(self):
return self._l_in.input_var |
def _check_shape_type(shape):
out = []
try:
shape = np.atleast_1d(shape)
for s in shape:
if (isinstance(s, np.ndarray) and (s.ndim > 0)):
raise TypeError(f'Value {s} is not a valid integer')
o = int(s)
if (o != s):
raise TypeError(f'Value {s} is not a valid integer')
out.append(o)
except Exception:
raise TypeError(f'Supplied value {shape} does not represent a valid shape')
return tuple(out) |
def cocofy_lvis(input_filename, output_filename):
with open(input_filename, 'r') as f:
lvis_json = json.load(f)
lvis_annos = lvis_json.pop('annotations')
cocofied_lvis = copy.deepcopy(lvis_json)
lvis_json['annotations'] = lvis_annos
lvis_cat_id_to_synset = {cat['id']: cat['synset'] for cat in lvis_json['categories']}
synset_to_coco_cat_id = {x['synset']: x['coco_cat_id'] for x in COCO_SYNSET_CATEGORIES}
synsets_to_keep = set(synset_to_coco_cat_id.keys())
coco_cat_id_with_instances = defaultdict(int)
new_annos = []
ann_id = 1
for ann in lvis_annos:
lvis_cat_id = ann['category_id']
synset = lvis_cat_id_to_synset[lvis_cat_id]
if (synset not in synsets_to_keep):
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_ann = copy.deepcopy(ann)
new_ann['category_id'] = coco_cat_id
new_ann['id'] = ann_id
ann_id += 1
new_annos.append(new_ann)
coco_cat_id_with_instances[coco_cat_id] += 1
cocofied_lvis['annotations'] = new_annos
for image in cocofied_lvis['images']:
for key in ['not_exhaustive_category_ids', 'neg_category_ids']:
new_category_list = []
for lvis_cat_id in image[key]:
synset = lvis_cat_id_to_synset[lvis_cat_id]
if (synset not in synsets_to_keep):
continue
coco_cat_id = synset_to_coco_cat_id[synset]
new_category_list.append(coco_cat_id)
coco_cat_id_with_instances[coco_cat_id] += 1
image[key] = new_category_list
coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())
new_categories = []
for cat in lvis_json['categories']:
synset = cat['synset']
if (synset not in synsets_to_keep):
continue
coco_cat_id = synset_to_coco_cat_id[synset]
if (coco_cat_id not in coco_cat_id_with_instances):
continue
new_cat = copy.deepcopy(cat)
new_cat['id'] = coco_cat_id
new_categories.append(new_cat)
cocofied_lvis['categories'] = new_categories
with open(output_filename, 'w') as f:
json.dump(cocofied_lvis, f)
print('{} is COCOfied and stored in {}.'.format(input_filename, output_filename)) |
class SAFENC(BaseFileHandler):
def __init__(self, filename, filename_info, filetype_info):
super(SAFENC, self).__init__(filename, filename_info, filetype_info)
self._start_time = filename_info['start_time']
self._end_time = filename_info['end_time']
self._fstart_time = filename_info['fstart_time']
self._fend_time = filename_info['fend_time']
self._polarization = filename_info['polarization']
self.lats = None
self.lons = None
self._shape = None
self.area = None
self.nc = xr.open_dataset(filename, decode_cf=True, mask_and_scale=False, chunks={'owiAzSize': CHUNK_SIZE, 'owiRaSize': CHUNK_SIZE})
self.nc = self.nc.rename({'owiAzSize': 'y'})
self.nc = self.nc.rename({'owiRaSize': 'x'})
self.filename = filename
def get_dataset(self, key, info):
if (key['name'] in ['owiLat', 'owiLon']):
if ((self.lons is None) or (self.lats is None)):
self.lons = self.nc['owiLon']
self.lats = self.nc['owiLat']
if (key['name'] == 'owiLat'):
res = self.lats
else:
res = self.lons
res.attrs = info
else:
res = self._get_data_channels(key, info)
if ('missionName' in self.nc.attrs):
res.attrs.update({'platform_name': self.nc.attrs['missionName']})
res.attrs.update({'fstart_time': self._fstart_time})
res.attrs.update({'fend_time': self._fend_time})
if (not self._shape):
self._shape = res.shape
return res
def _get_data_channels(self, key, info):
res = self.nc[key['name']]
if (key['name'] in ['owiHs', 'owiWl', 'owiDirmet']):
res = xr.DataArray(res, dims=['y', 'x', 'oswPartitions'])
elif (key['name'] in ['owiNrcs', 'owiNesz', 'owiNrcsNeszCorr']):
res = xr.DataArray(res, dims=['y', 'x', 'oswPolarisation'])
elif (key['name'] in ['owiPolarisationName']):
res = xr.DataArray(res, dims=['owiPolarisation'])
elif (key['name'] in ['owiCalConstObsi', 'owiCalConstInci']):
res = xr.DataArray(res, dims=['owiIncSize'])
elif key['name'].startswith('owi'):
res = xr.DataArray(res, dims=['y', 'x'])
else:
res = xr.DataArray(res, dims=['y', 'x'])
res.attrs.update(info)
if ('_FillValue' in res.attrs):
res = res.where((res != res.attrs['_FillValue']))
res.attrs['_FillValue'] = np.nan
return res
def start_time(self):
return self._start_time
def end_time(self):
return self._end_time
def fstart_time(self):
return self._fstart_time
def fend_time(self):
return self._fend_time |
def translate(context, text, disambiguation=None):
newtext = QtCore.QCoreApplication.translate(context, text, disambiguation)
(s, tt) = _splitMainAndTt(newtext)
translation = Translation(s)
translation.original = text
translation.tt = tt
translation.key = _splitMainAndTt(text)[0].strip()
return translation |
def _decorator_ignore_request_apikey(func):
(func)
def wrapper(self, request, spider):
url = urlparse(request.url)
query_args = parse_qs(url.query)
apikey = query_args.get('apikey', list())
if (len(apikey) != 0):
del query_args['apikey']
token = query_args.get('token', list())
if (len(token) != 0):
del query_args['token']
_url = '?'.join([('%s://%s%s' % (url.scheme, url.netloc, url.path)), urlencode({k: (v[0] if (len(v) > 0) else '') for (k, v) in query_args.items()})])
_request = request.replace(url=_url)
rlt = func(self, _request, spider)
return rlt
return wrapper |
.parametrize('source, expected', [("html.div(dict(camelCase='test'))", "html.div(dict(camel_case='test'))"), ("reactpy.html.button({'onClick': block_forever})", "reactpy.html.button({'on_click': block_forever})"), ("html.div(dict(style={'testThing': test}))", "html.div(dict(style={'test_thing': test}))"), ('html.div(dict(style=dict(testThing=test)))', 'html.div(dict(style=dict(test_thing=test)))'), ("vdom('tag', dict(camelCase='test'))", "vdom('tag', dict(camel_case='test'))"), ("vdom('tag', dict(camelCase='test', **props))", "vdom('tag', dict(camel_case='test', **props))"), ("html.div({'camelCase': test, 'data-thing': test})", "html.div({'camel_case': test, 'data-thing': test})"), ("html.div({'camelCase': test, ignore: this})", "html.div({'camel_case': test, ignore: this})"), ("html.div({'snake_case': test})", None), ("html.div({'data-case': test})", None), ("html.div(dict(snake_case='test'))", None), ('html.div()', None), ("vdom('tag')", None), ("html.div('child')", None), ("vdom('tag', 'child')", None)], ids=(lambda item: (' '.join(map(str.strip, item.split())) if isinstance(item, str) else item)))
def test_generate_rewrite(source, expected):
actual = generate_rewrite(Path('test.py'), dedent(source).strip())
if isinstance(expected, str):
expected = dedent(expected).strip()
assert (actual == expected) |
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if (not requires_grad):
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple('AlexnetOutputs', ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out |
def test_packed_array_port_array(do_test):
class struct():
bar: Bits32
foo: ([([Bits32] * 2)] * 3)
class A(Component):
def construct(s):
s.in_ = [InPort(struct) for _ in range(2)]
a = A()
foo = rdt.PackedArray([3, 2], rdt.Vector(32))
st = rdt.Struct(struct, {'bar': rdt.Vector(32), 'foo': foo})
a._ref_ports = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['in_'], 'in_', rt.Array([2], rt.Port('input', st)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0)]
a._ref_ports_yosys = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['in_[0].bar'], 'in___0__bar', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].foo[0][0]'], 'in___0__foo__0__0', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].foo[0][1]'], 'in___0__foo__0__1', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].foo[1][0]'], 'in___0__foo__1__0', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].foo[1][1]'], 'in___0__foo__1__1', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].foo[2][0]'], 'in___0__foo__2__0', rt.Port('input', rdt.Vector(32)), 0), (['in_[0].foo[2][1]'], 'in___0__foo__2__1', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].bar'], 'in___1__bar', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].foo[0][0]'], 'in___1__foo__0__0', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].foo[0][1]'], 'in___1__foo__0__1', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].foo[1][0]'], 'in___1__foo__1__0', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].foo[1][1]'], 'in___1__foo__1__1', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].foo[2][0]'], 'in___1__foo__2__0', rt.Port('input', rdt.Vector(32)), 0), (['in_[1].foo[2][1]'], 'in___1__foo__2__1', rt.Port('input', rdt.Vector(32)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0)]
do_test(a) |
def test_axles():
fa = OSC.Axle(2, 2, 2, 1, 1)
ra = OSC.Axle(1, 1, 2, 1, 1)
aa = OSC.Axle(1, 1, 2, 1, 1)
aa2 = OSC.Axle(2, 3, 1, 3, 2)
axles = OSC.Axles(fa, ra)
axles.add_axle(aa)
axles.add_axle(aa2)
prettyprint(axles.get_element())
axles2 = OSC.Axles(fa, ra)
axles2.add_axle(aa)
axles2.add_axle(aa2)
axles3 = OSC.Axles(fa, ra)
assert (axles == axles2)
assert (axles != axles3)
axles4 = OSC.Axles.parse(axles.get_element())
assert (axles == axles4)
assert (version_validation('Axles', axles, 0) == ValidationResponse.OK)
assert (version_validation('Axles', axles, 1) == ValidationResponse.OK)
assert (version_validation('Axles', axles, 2) == ValidationResponse.OK) |
class GraspNetBaseLine():
def __init__(self, checkpoint_path, num_point=20000, num_view=300, collision_thresh=0.001, empty_thresh=0.15, voxel_size=0.01):
self.checkpoint_path = checkpoint_path
self.num_point = num_point
self.num_view = num_view
self.collision_thresh = collision_thresh
self.empty_thresh = empty_thresh
self.voxel_size = voxel_size
self.net = self.get_net()
def get_net(self):
net = GraspNet(input_feature_dim=0, num_view=self.num_view, num_angle=12, num_depth=4, cylinder_radius=0.05, hmin=(- 0.02), hmax_list=[0.01, 0.02, 0.03, 0.04], is_training=False)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
net.to(device)
checkpoint = torch.load(self.checkpoint_path)
net.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
print(('-> loaded checkpoint %s (epoch: %d)' % (self.checkpoint_path, start_epoch)))
net.eval()
return net
def get_and_process_data(self, cloud):
cloud = cloud.voxel_down_sample(0.001)
cloud_masked = np.asarray(cloud.points)
color_masked = np.asarray(cloud.colors)
if (len(cloud_masked) >= self.num_point):
idxs = np.random.choice(len(cloud_masked), self.num_point, replace=False)
else:
idxs1 = np.arange(len(cloud_masked))
idxs2 = np.random.choice(len(cloud_masked), (self.num_point - len(cloud_masked)), replace=True)
idxs = np.concatenate([idxs1, idxs2], axis=0)
cloud_sampled = cloud_masked[idxs]
color_sampled = color_masked[idxs]
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(cloud_masked.astype(np.float32))
cloud.colors = o3d.utility.Vector3dVector(color_masked.astype(np.float32))
end_points = dict()
cloud_sampled = torch.from_numpy(cloud_sampled[np.newaxis].astype(np.float32))
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
cloud_sampled = cloud_sampled.to(device)
end_points['point_clouds'] = cloud_sampled
end_points['cloud_colors'] = color_sampled
return (end_points, cloud)
def get_grasps(self, end_points):
with torch.no_grad():
end_points = self.net(end_points)
grasp_preds = pred_decode(end_points)
objectness_score = end_points['objectness_score']
from matplotlib import pyplot as plt
gg_array = grasp_preds[0].detach().cpu().numpy()
gg = GraspGroup(gg_array)
return gg
def inference(self, o3d_pcd):
(end_points, cloud) = self.get_and_process_data(o3d_pcd)
gg = self.get_grasps(end_points)
if (self.collision_thresh > 0):
gg = self.collision_detection(gg, np.array(cloud.points))
return gg
def collision_detection(self, gg, cloud):
mfcdetector = ModelFreeCollisionDetector(cloud, voxel_size=self.voxel_size)
collision_mask = mfcdetector.detect(gg, approach_dist=0.05, collision_thresh=self.collision_thresh, empty_thresh=self.empty_thresh)
gg = gg[(~ collision_mask)]
return gg |
.parametrize('namespace,repository,uuid,expected_code', [('devtable', 'simple', 'exists', 200), ('devtable', 'simple', 'not found', 404)])
def test_get_repo_notification(namespace, repository, uuid, expected_code, authd_client, monkeypatch):
monkeypatch.setattr('endpoints.api.repositorynotification.model.get_repo_notification', mock_get_notification(uuid))
params = {'repository': ((namespace + '/') + repository), 'uuid': uuid}
conduct_api_call(authd_client, RepositoryNotification, 'GET', params, expected_code=expected_code) |
class PickupExporterSolo(PickupExporter):
def __init__(self, memo_data: dict[(str, str)], game: RandovaniaGame):
self.memo_data = memo_data
super().__init__(game)
def create_details(self, original_index: PickupIndex, pickup_target: PickupTarget, visual_pickup: PickupEntry, model_pickup: PickupEntry, model_style: PickupModelStyle, name: str, description: str) -> ExportedPickupDetails:
pickup = pickup_target.pickup
return ExportedPickupDetails(index=original_index, name=name, description=description, collection_text=_calculate_collection_text(pickup, visual_pickup, model_style, self.memo_data), conditional_resources=_conditional_resources_for_pickup(pickup), conversion=list(pickup.convert_resources), model=self.get_model(model_pickup), original_model=model_pickup.model, other_player=False, original_pickup=pickup) |
def test():
spi1 = SPI(1, baudrate=, sck=Pin(14), mosi=Pin(13))
display = Display(spi1, dc=Pin(4), cs=Pin(16), rst=Pin(17))
spi2 = SPI(2, baudrate=1000000, sck=Pin(18), mosi=Pin(23), miso=Pin(19))
Demo(display, spi2)
try:
while True:
idle()
except KeyboardInterrupt:
print('\nCtrl-C pressed. Cleaning up and exiting...')
finally:
display.cleanup() |
class BaseCorr3dMM(OpenMPOp, _NoPythonOp):
check_broadcast = False
__props__ = ('border_mode', 'subsample', 'filter_dilation', 'num_groups')
_direction: Optional[str] = None
params_type = ParamsType(direction=EnumList(('DIRECTION_FORWARD', 'forward'), ('DIRECTION_BACKPROP_WEIGHTS', 'backprop weights'), ('DIRECTION_BACKPROP_INPUTS', 'backprop inputs')), dH=int64, dW=int64, dD=int64, dilH=int64, dilW=int64, dilD=int64, padH=int64, padW=int64, padD=int64, num_groups=int64)
def __init__(self, border_mode='valid', subsample=(1, 1, 1), filter_dilation=(1, 1, 1), openmp=None, num_groups=1):
super().__init__(openmp=openmp)
if isinstance(border_mode, int):
if (border_mode < 0):
raise ValueError('invalid border_mode {}, which must be a non-negative integer'.format(border_mode))
border_mode = (border_mode, border_mode, border_mode)
if isinstance(border_mode, tuple):
if ((len(border_mode) != 3) or (min(border_mode) < 0)):
raise ValueError('invalid border_mode {}, which must be a tuple of three non-negative integers'.format(border_mode))
(pad_h, pad_w, pad_d) = map(int, border_mode)
border_mode = (pad_h, pad_w, pad_d)
if (not ((isinstance(border_mode, tuple) and (min(border_mode) >= 0)) or (border_mode in ('valid', 'full', 'half')))):
raise ValueError('invalid border_mode {}, which must be either "valid", "full", "half", an integer or a tuple of three integers'.format(border_mode))
self.border_mode = border_mode
if (len(subsample) != 3):
raise ValueError('subsample must have three elements')
if (len(filter_dilation) != 3):
raise ValueError('filter_dilation must have three elements')
self.subsample = tuple(subsample)
self.filter_dilation = tuple(filter_dilation)
if (num_groups < 1):
raise ValueError('Number of groups should be greater than 0')
self.num_groups = num_groups
if (not config.blas__ldflags):
self.blas_type = ''
elif ('openblas' in config.blas__ldflags):
self.blas_type = 'openblas'
elif ('mkl' in config.blas__ldflags):
self.blas_type = 'mkl'
else:
self.blas_type = ''
if (self._direction not in ('forward', 'backprop weights', 'backprop inputs')):
raise ValueError("_direction must be one of 'forward', 'backprop weights', 'backprop inputs'")
def pad(self):
if (self.border_mode == 'half'):
return ((- 1), (- 1), (- 1))
elif (self.border_mode == 'full'):
return ((- 2), (- 2), (- 2))
elif isinstance(self.border_mode, tuple):
return self.border_mode
else:
assert (self.border_mode == 'valid')
return (0, 0, 0)
direction = property((lambda self: self.params_type.enum_from_alias(self._direction)))
dH = property((lambda self: self.subsample[0]))
dW = property((lambda self: self.subsample[1]))
dD = property((lambda self: self.subsample[2]))
dilH = property((lambda self: self.filter_dilation[0]))
dilW = property((lambda self: self.filter_dilation[1]))
dilD = property((lambda self: self.filter_dilation[2]))
padH = property((lambda self: self.pad[0]))
padW = property((lambda self: self.pad[1]))
padD = property((lambda self: self.pad[2]))
def __str__(self):
return '{}{{{}, {}, {}, {}}}'.format(self.__class__.__name__, self.border_mode, str(self.subsample), str(self.filter_dilation), str(self.num_groups))
def as_common_dtype(in1, in2):
dtype = pytensor.scalar.upcast(in1.dtype, in2.dtype)
return (in1.astype(dtype), in2.astype(dtype))
def __setstate__(self, d):
self.__dict__.update(d)
if (not hasattr(self, 'num_groups')):
self.num_groups = 1
def c_support_code(self, **kwargs):
ccodes = blas_headers.blas_header_text()
if (self.blas_type == 'openblas'):
ccodes += blas_headers.openblas_threads_text()
elif (self.blas_type == 'mkl'):
ccodes += blas_headers.mkl_threads_text()
return ccodes
def c_libraries(self, **kwargs):
return ldflags()
def c_compile_args(self, **kwargs):
compile_args = ldflags(libs=False, flags=True)
compile_args += super().c_compile_args(**kwargs)
return compile_args
def c_lib_dirs(self, **kwargs):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self, **kwargs):
return ldflags(libs=False, include_dir=True)
def c_headers(self, **kwargs):
headers = ['<stdio.h>']
headers += super().c_headers(**kwargs)
return headers
def c_code_cache_version(self):
return (8, self.openmp, blas_header_version())
def c_support_code_apply(self, node, nodename):
sub = {}
dtype = str(node.__dict__['inputs'][0].dtype)
assert (dtype in ('float32', 'float64'))
if (dtype == 'float32'):
sub['gemm'] = 'sgemm_'
sub['float_type'] = 'npy_float'
sub['float_typenum'] = 'NPY_FLOAT'
sub['n_bytes'] = 4
sub['c_float_type'] = 'float'
else:
sub['gemm'] = 'dgemm_'
sub['float_type'] = 'npy_double'
sub['float_typenum'] = 'NPY_DOUBLE'
sub['n_bytes'] = 8
sub['c_float_type'] = 'double'
if self.openmp:
sub['omp_flags'] = '#pragma omp parallel for schedule(static)'
sub['omp_get_max_threads'] = 'omp_get_max_threads()'
sub['omp_get_thread_num'] = 'omp_get_thread_num()'
if (self.blas_type == 'openblas'):
sub['blas_set_num_threads'] = 'openblas_set_num_threads'
sub['blas_get_num_threads'] = 'openblas_get_num_threads()'
elif (self.blas_type == 'mkl'):
sub['blas_set_num_threads'] = 'mkl_set_num_threads'
sub['blas_get_num_threads'] = 'mkl_get_max_threads()'
else:
sub['blas_set_num_threads'] = ''
sub['blas_get_num_threads'] = '0'
else:
sub['omp_flags'] = ''
sub['omp_get_max_threads'] = '1'
sub['omp_get_thread_num'] = '0'
sub['blas_set_num_threads'] = ''
sub['blas_get_num_threads'] = '0'
final_code = ''
with open(os.path.join(os.path.split(__file__)[0], os.path.join('c_code', 'corr3d_gemm.c'))) as f:
code = f.read()
final_code += code
return (final_code % sub)
def c_code_helper(self, bottom, weights, top, sub, height=None, width=None, depth=None):
if height:
height = f'(*(npy_int64 *)(PyArray_DATA({height})))'
else:
if (((self.direction != 0) and (self.dH != 1)) or ((self.direction == 1) and (self.padH == (- 1)))):
raise ValueError("height must be given for backprop with vertical sampling or border_mode='half'")
height = '-1'
if width:
width = f'(*(npy_int64 *)(PyArray_DATA({width})))'
else:
if (((self.direction != 0) and (self.dW != 1)) or ((self.direction == 1) and (self.padW == (- 1)))):
raise ValueError("width must be given for backprop with horizontal sampling or border_mode='half'")
width = '-1'
if depth:
depth = f'(*(npy_int64 *)(PyArray_DATA({depth})))'
else:
if (((self.direction != 0) and (self.dD != 1)) or ((self.direction == 1) and (self.padD == (- 1)))):
raise ValueError("depth must be given for backprop with depth sampling or border_mode='half'")
depth = '-1'
return ('\n // Mandatory args\n int direction = %(params)s->direction; // forward, bprop weights, bprop inputs\n\n // Optional args\n int dH = %(params)s->dH;\n int dW = %(params)s->dW;\n int dD = %(params)s->dD;\n int dilH = %(params)s->dilH;\n int dilW = %(params)s->dilW;\n int dilD = %(params)s->dilD;\n int padH = %(params)s->padH;\n int padW = %(params)s->padW;\n int padD = %(params)s->padD;\n int numgroups = %(params)s->num_groups;\n\n PyArrayObject * bottom = %(bottom)s;\n PyArrayObject * weights = %(weights)s;\n PyArrayObject * top = %(top)s;\n PyArrayObject * out2 = NULL;\n PyArrayObject **out = NULL;\n\n switch(%(params)s->direction) {\n case DIRECTION_FORWARD:\n out = &%(top)s;\n break;\n case DIRECTION_BACKPROP_WEIGHTS:\n out = &%(weights)s;\n break;\n case DIRECTION_BACKPROP_INPUTS:\n out = &%(bottom)s;\n break;\n default:\n PyErr_SetString(PyExc_ValueError, "CPU Corr3dMM: Invalid direction.");\n {%(fail)s}\n break;\n }\n\n // Obtain or infer kernel width, height and depth\n // (we need to know it early to be able to handle auto-padding)\n int kH, kW, kD, dil_kH, dil_kW, dil_kD;\n if (direction != 1) {\n // weight is an input variable, we can just read its shape\n kH = PyArray_DIMS(weights)[2];\n kW = PyArray_DIMS(weights)[3];\n kD = PyArray_DIMS(weights)[4];\n }\n else {\n if (%(height)s != -1) {\n // kernel height is specified (perhaps vertical subsampling or half padding)\n kH = %(height)s;\n }\n else if (padH == -2) {\n // vertical full padding, we can infer the kernel height\n kH = (2 - PyArray_DIMS(bottom)[2] + (PyArray_DIMS(top)[2] - 1) * dH - 1)/ dilH + 1;\n }\n else {\n // explicit padding, we can infer the kernel height\n kH = (PyArray_DIMS(bottom)[2] + 2*padH - (PyArray_DIMS(top)[2] - 1) * dH - 1) / dilH +1;\n }\n if (%(width)s != -1) {\n kW = %(width)s;\n }\n else if (padW == -2) {\n kW = (2 - PyArray_DIMS(bottom)[3] + (PyArray_DIMS(top)[3] - 1) * dW - 1) / dilW + 1;\n }\n else {\n kW = (PyArray_DIMS(bottom)[3] + 2*padW - (PyArray_DIMS(top)[3] - 1) * dW - 1) / dilW + 1;\n }\n if (%(depth)s != -1) {\n kD = %(depth)s;\n }\n else if (padD == -2) {\n kD = (2 - PyArray_DIMS(bottom)[4] + (PyArray_DIMS(top)[4] - 1) * dD - 1) / dilD + 1;\n }\n else {\n kD = (PyArray_DIMS(bottom)[4] + 2*padD - (PyArray_DIMS(top)[4] - 1) * dD - 1) / dilD + 1;\n }\n }\n\n // Implicit dilated kernel size\n dil_kH = (kH - 1) * dilH + 1;\n dil_kW = (kW - 1) * dilW + 1;\n dil_kD = (kD - 1) * dilD + 1;\n\n // Auto-padding if requested\n if (padH == -1) { // vertical half padding\n padH = dil_kH / 2;\n }\n else if (padH == -2) { // vertical full padding\n padH = dil_kH - 1;\n }\n else if (padH < 0) {\n PyErr_SetString(PyExc_ValueError, "BaseCorr3dMM: padH must be >= -2");\n %(fail)s\n }\n if (padW == -1) { // horizontal half padding\n padW = dil_kW / 2;\n }\n else if (padW == -2) { // horizontal full padding\n padW = dil_kW - 1;\n }\n else if (padW < 0) {\n PyErr_SetString(PyExc_ValueError, "BaseCorr3dMM: padW must be >= -2");\n %(fail)s\n }\n if (padD == -1) { // depth half padding\n padD = dil_kD / 2;\n }\n else if (padD == -2) { // depth full padding\n padD = dil_kD - 1;\n }\n else if (padD < 0) {\n PyErr_SetString(PyExc_ValueError, "BaseCorr3dMM: padD must be >= -2");\n %(fail)s\n }\n\n // Infer output shape\n npy_intp out_dim[5];\n switch(direction) {\n case 0: // forward pass\n // output is top: (batchsize, num_filters, height, width, depth)\n // height and width: top = (bottom + 2*pad - ((weight-1)*dil + 1)) / sample + 1\n out_dim[0] = (npy_intp)PyArray_DIMS(bottom)[0];\n out_dim[1] = (npy_intp)PyArray_DIMS(weights)[0];\n out_dim[2] = (npy_intp)((PyArray_DIMS(bottom)[2] + 2*padH - ((PyArray_DIMS(weights)[2]-1)*dilH + 1)) / dH + 1);\n out_dim[3] = (npy_intp)((PyArray_DIMS(bottom)[3] + 2*padW - ((PyArray_DIMS(weights)[3]-1)*dilW + 1)) / dW + 1);\n out_dim[4] = (npy_intp)((PyArray_DIMS(bottom)[4] + 2*padD - ((PyArray_DIMS(weights)[4]-1)*dilD + 1)) / dD + 1);\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0 || out_dim[4] <= 0)\n {\n PyErr_Format(PyExc_ValueError,\n "Corr3dMM: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n",\n (long int)PyArray_DIMS(bottom)[0], (long int)PyArray_DIMS(bottom)[1],\n (long int)PyArray_DIMS(bottom)[2], (long int)PyArray_DIMS(bottom)[3],\n (long int)PyArray_DIMS(bottom)[4],\n (long int)PyArray_DIMS(weights)[0], (long int)PyArray_DIMS(weights)[1],\n (long int)PyArray_DIMS(weights)[2], (long int)PyArray_DIMS(weights)[3],\n (long int)PyArray_DIMS(weights)[4],\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3], (long int)out_dim[4]);\n %(fail)s\n }\n break;\n case 1: // backprop wrt. weights\n // output is weights: (num_filters, num_channels, height, width, depth)\n // height and width: weights = (bottom + 2*pad - (top - 1) * sample - 1) / dil + 1\n out_dim[0] = (npy_intp)PyArray_DIMS(top)[1];\n out_dim[1] = (npy_intp)PyArray_DIMS(bottom)[1] / numgroups;\n out_dim[2] = (npy_intp)kH; // already inferred further above\n out_dim[3] = (npy_intp)kW; // how convenient\n out_dim[4] = (npy_intp)kD;\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0 || out_dim[4] <= 0)\n {\n PyErr_Format(PyExc_ValueError,\n "Corr3dMM backprop wrt. weights: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n",\n (long int)PyArray_DIMS(bottom)[0], (long int)PyArray_DIMS(bottom)[1],\n (long int)PyArray_DIMS(bottom)[2], (long int)PyArray_DIMS(bottom)[3],\n (long int)PyArray_DIMS(bottom)[4],\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3], (long int)out_dim[4],\n (long int)PyArray_DIMS(top)[0], (long int)PyArray_DIMS(top)[1],\n (long int)PyArray_DIMS(top)[2], (long int)PyArray_DIMS(top)[3],\n (long int)PyArray_DIMS(top)[4]);\n %(fail)s\n }\n break;\n case 2: // backprop wrt. inputs\n // output is bottom: (batchsize, num_channels, height, width, depth)\n // height and width: bottom = (top - 1) * sample + (weights-1)*dil + 1 - 2*pad\n out_dim[0] = (npy_intp)PyArray_DIMS(top)[0];\n out_dim[1] = (npy_intp)PyArray_DIMS(weights)[1] * numgroups;\n out_dim[2] = (npy_intp)((%(height)s != -1) ? %(height)s : (PyArray_DIMS(top)[2] - 1) * dH + (PyArray_DIMS(weights)[2]-1)*dilH + 1 - 2*padH);\n out_dim[3] = (npy_intp)((%(width)s != -1) ? %(width)s : (PyArray_DIMS(top)[3] - 1) * dW + (PyArray_DIMS(weights)[3]-1)*dilW + 1 - 2*padW);\n out_dim[4] = (npy_intp)((%(depth)s != -1) ? %(depth)s : (PyArray_DIMS(top)[4] - 1) * dD + (PyArray_DIMS(weights)[4]-1)*dilD + 1 - 2*padD);\n if (out_dim[0] < 0 || out_dim[1] < 0 || out_dim[2] <= 0 || out_dim[3] <= 0 || out_dim[4] <= 0)\n {\n PyErr_Format(PyExc_ValueError,\n "Corr3dMM backprop wrt. inputs: impossible output shape\\n"\n " bottom shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " weights shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n"\n " top shape: %%ld x %%ld x %%ld x %%ld x %%ld\\n",\n (long int)out_dim[0], (long int)out_dim[1], (long int)out_dim[2],\n (long int)out_dim[3], (long int)out_dim[4],\n (long int)PyArray_DIMS(weights)[0], (long int)PyArray_DIMS(weights)[1],\n (long int)PyArray_DIMS(weights)[2], (long int)PyArray_DIMS(weights)[3],\n (long int)PyArray_DIMS(weights)[4],\n (long int)PyArray_DIMS(top)[0], (long int)PyArray_DIMS(top)[1],\n (long int)PyArray_DIMS(top)[2], (long int)PyArray_DIMS(top)[3],\n (long int)PyArray_DIMS(top)[4]);\n %(fail)s\n }\n break;\n default:\n PyErr_SetString(PyExc_ValueError, "BaseCorr3dMM: direction must be 0, 1, or 2\\n");\n %(fail)s\n }\n\n // Prepare output array\n int typenum;\n if ( !(*out\n && PyArray_NDIM(*out)==4\n && PyArray_IS_C_CONTIGUOUS(*out)\n && PyArray_DIMS(*out)[0]==out_dim[0]\n && PyArray_DIMS(*out)[1]==out_dim[1]\n && PyArray_DIMS(*out)[2]==out_dim[2]\n && PyArray_DIMS(*out)[3]==out_dim[3]\n && PyArray_DIMS(*out)[4]==out_dim[4]))\n {\n Py_XDECREF(*out);\n if (direction != 1) {\n typenum = PyArray_TYPE(weights);\n }\n else {\n typenum = PyArray_TYPE(bottom);\n }\n //Change to PyArray_ZEROS which is faster than PyArray_EMPTY.\n *out = (PyArrayObject*)PyArray_ZEROS(5,\n out_dim,\n typenum,\n 0);\n if (NULL == *out)\n {\n PyErr_Format(PyExc_RuntimeError,\n "BaseCorr3dMM: Failed to allocate output of %%lld x %%lld x %%lld x %%lld x %%lld",\n (long long)out_dim[0], (long long)out_dim[1],\n (long long)out_dim[2], (long long)out_dim[3], (long long)out_dim[4]);\n %(fail)s\n }\n }\n\n // Call corr3dMM code\n out2 = corr3dMM(%(bottom)s, %(weights)s, %(top)s, direction,\n dH, dW, dD, dilH, dilW, dilD, padH, padW, padD,\n numgroups);\n if (out2==NULL){\n %(fail)s\n }\n assert (out2 == *out);\n\n' % dict(bottom=bottom, weights=weights, top=top, height=height, width=width, depth=depth, fail=sub['fail'], params=sub['params'])) |
def test_class_smoothing():
box = np.array([0, 0, 10, 10])
mot = MultiObjectTracker(dt=0.1)
mot.step([Detection(box=box, class_id=1)])
mot.step([Detection(box=box, class_id=2)])
mot.step([Detection(box=box, class_id=2)])
assert (mot.trackers[0].class_id == 2)
mot.step([Detection(box=box, class_id=1)])
mot.step([Detection(box=box, class_id=1)])
assert (mot.trackers[0].class_id == 1) |
def _test_sharding_and_remapping(tables: List[EmbeddingBagConfig], rank: int, world_size: int, kjt_input_per_rank: List[KeyedJaggedTensor], kjt_out_per_iter_per_rank: List[List[KeyedJaggedTensor]], sharder: ModuleSharder[nn.Module], backend: str, local_size: Optional[int]=None, mch_size: Optional[int]=None) -> None:
with MultiProcessContext(rank, world_size, backend, local_size) as ctx:
kjt_input = kjt_input_per_rank[rank].to(ctx.device)
kjt_out_per_iter = [kjt[rank].to(ctx.device) for kjt in kjt_out_per_iter_per_rank]
return_remapped: bool = True
sparse_arch = SparseArch(tables, torch.device('meta'), return_remapped=return_remapped, mch_size=mch_size)
apply_optimizer_in_backward(RowWiseAdagrad, [sparse_arch._mc_ebc._embedding_bag_collection.embedding_bags['table_0'].weight, sparse_arch._mc_ebc._embedding_bag_collection.embedding_bags['table_1'].weight], {'lr': 0.01})
module_sharding_plan = construct_module_sharding_plan(sparse_arch._mc_ebc, per_param_sharding={'table_0': row_wise(), 'table_1': row_wise()}, local_size=local_size, world_size=world_size, device_type=('cuda' if torch.cuda.is_available() else 'cpu'), sharder=sharder)
sharded_sparse_arch = _shard_modules(module=copy.deepcopy(sparse_arch), plan=ShardingPlan({'_mc_ebc': module_sharding_plan}), env=ShardingEnv.from_process_group(ctx.pg), sharders=[sharder], device=ctx.device)
assert isinstance(sharded_sparse_arch._mc_ebc, ShardedManagedCollisionEmbeddingBagCollection)
assert isinstance(sharded_sparse_arch._mc_ebc._embedding_bag_collection, ShardedEmbeddingBagCollection)
assert (sharded_sparse_arch._mc_ebc._embedding_bag_collection._has_uninitialized_input_dist is False)
assert ((not hasattr(sharded_sparse_arch._mc_ebc._embedding_bag_collection, '_input_dists')) or (len(sharded_sparse_arch._mc_ebc._embedding_bag_collection._input_dists) == 0))
assert isinstance(sharded_sparse_arch._mc_ebc._managed_collision_collection, ShardedManagedCollisionCollection)
test_state_dict = sharded_sparse_arch.state_dict()
sharded_sparse_arch.load_state_dict(test_state_dict)
(loss1, remapped_ids1) = sharded_sparse_arch(kjt_input)
loss1.backward()
(loss2, remapped_ids2) = sharded_sparse_arch(kjt_input)
loss2.backward()
remapped_ids = [remapped_ids1, remapped_ids2]
for key in kjt_input.keys():
for (i, kjt_out) in enumerate(kjt_out_per_iter):
assert torch.equal(remapped_ids[i][key].values(), kjt_out[key].values()), f'feature {key} on {ctx.rank} iteration {i} does not match, got {remapped_ids[i][key].values()}, expect {kjt_out[key].values()}' |
def train(cfg: ModelSettings) -> None:
if (cfg.load_pt_checkpoint is not None):
load_strategy = LoadPTCheckpointStrategy(cfg.load_pt_checkpoint, cfg=cfg, generation_flag=True)
model = load_strategy.get_model(DNATransformer)
elif (cfg.load_ds_checkpoint is not None):
load_strategy = LoadDeepSpeedStrategy(cfg.load_ds_checkpoint, cfg=cfg)
model = load_strategy.get_model(DNATransformer)
print(f'Loaded existing model at checkpoint {cfg.load_ds_checkpoint}....')
else:
model = DNATransformer(cfg)
callbacks: List[Callback] = []
print(f'Number of model parameters: {sum((p.numel() for p in model.parameters()))}')
wandb_logger = None
if cfg.wandb_active:
node_rank = os.environ.get('NODE_RANK')
rank = os.environ.get('RANK')
local_rank = os.environ.get('LOCAL_RANK')
slurm_procid = os.environ.get('SLURM_PROCID')
jsm_namespace = os.environ.get('JSM_NAMESPACE_RANK')
wandb_active_env = os.environ.get('PERLMUTTER_WANDB')
print(f'rank={rank!r}, local_rank={local_rank!r}, slurm_procid={slurm_procid!r}, jsm_namespace={jsm_namespace!r}, node_rank={node_rank!r}')
if (rank is not None):
rank = int(rank)
if (((rank == 0) and (local_rank is None)) or bool(wandb_active_env)):
wandb_logger = WandbLogger(project=cfg.wandb_project_name, entity=cfg.wandb_entity_name, name=cfg.wandb_model_tag, id=cfg.wandb_model_tag)
callbacks.append(LearningRateMonitor(logging_interval='step'))
if (cfg.checkpoint_dir is not None):
callbacks.append(ModelCheckpoint(dirpath=cfg.checkpoint_dir, save_last=True, verbose=True, monitor='val/loss', auto_insert_metric_name=False, filename='model-epoch{epoch:02d}-val_loss{val/loss:.2f}', save_top_k=3, every_n_train_steps=cfg.checkpoint_every_n_train_steps, every_n_epochs=cfg.checkpoint_every_n_epochs))
if cfg.enable_blast:
assert (cfg.checkpoint_dir is not None)
callbacks.append(BLASTCallback(block_size=cfg.block_size, database_file=cfg.blast_validation_file, output_dir=(cfg.checkpoint_dir / 'blast'), blast_exe_path=cfg.blast_exe_path, num_blast_seqs_per_gpu=cfg.num_blast_seqs_per_gpu, node_local_path=cfg.node_local_path))
if cfg.num_test_seqs_per_gpu:
assert (cfg.checkpoint_dir is not None)
callbacks.append(SequenceGenerationCallback(block_size=cfg.block_size, num_test_seqs_per_gpu=cfg.num_test_seqs_per_gpu, output_dir=(cfg.checkpoint_dir / 'generated'), custom_seq_name=cfg.custom_seq_name))
if cfg.enable_perplexity:
callbacks.append(PerplexityCallback(log_steps=cfg.log_every_n_steps))
if cfg.compute_throughput:
callbacks = [ThroughputMonitor(cfg.batch_size, cfg.num_nodes, cfg.wandb_active)]
profiler = None
if cfg.profiling_path:
profiler = PyTorchProfiler(dirpath=cfg.profiling_path, profiler_kwargs={'activities': [torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], 'schedule': torch.profiler.schedule(wait=0, warmup=1, active=3), 'on_trace_ready': torch.profiler.tensorboard_trace_handler('./')})
if cfg.deepspeed_flops_profile:
max_steps = 7
else:
max_steps = cfg.max_steps
trainer = pl.Trainer(gpus=(- 1), default_root_dir=str(cfg.checkpoint_dir), strategy=DeepSpeedStrategy(stage=cfg.deepspeed_stage, offload_optimizer=cfg.offload_optimizer, offload_parameters=cfg.offload_parameters, remote_device=cfg.offload_device, offload_params_device=cfg.offload_device, offload_optimizer_device=cfg.offload_device, nvme_path=cfg.nvme_path, logging_batch_size_per_gpu=cfg.batch_size, partition_activations=cfg.partition_activations, cpu_checkpointing=True, allgather_bucket_size=.0, reduce_bucket_size=.0, pin_memory=True, contiguous_memory_optimization=False), callbacks=callbacks, logger=wandb_logger, profiler=profiler, accumulate_grad_batches=cfg.accumulate_grad_batches, num_sanity_val_steps=2, precision=cfg.precision, max_epochs=cfg.epochs, num_nodes=cfg.num_nodes, check_val_every_n_epoch=cfg.check_val_every_n_epoch, val_check_interval=cfg.val_check_interval, log_every_n_steps=cfg.log_every_n_steps, limit_val_batches=cfg.limit_val_batches, max_steps=max_steps, gradient_clip_val=cfg.gradient_clip_value)
trainer.fit(model)
if (cfg.deepspeed_flops_profile and trainer.is_global_zero):
flops = model.flops_profiler.get_total_flops()
macs = model.flops_profiler.get_total_macs()
params = model.flops_profiler.get_total_params()
print('Flops: {}, macs: {}, params: {}'.format(flops, macs, params))
model.flops_profiler.print_model_profile(profile_step=5)
model.flops_profiler.end_profile()
return
if cfg.compute_throughput:
return
trainer.test(model)
if trainer.is_global_zero:
print('Completed training.') |
def test_main_with_list_actions(tmpfolder, capsys, isolated_logger):
args = ['my-project', '--no-tox', '--list-actions']
cli.main(args)
(out, _) = capsys.readouterr()
assert ('Planned Actions' in out)
assert ('pyscaffold.actions:get_default_options' in out)
assert ('pyscaffold.structure:define_structure' in out)
assert ('pyscaffold.extensions.no_tox:remove_files' in out)
assert ('pyscaffold.structure:create_structure' in out)
assert ('pyscaffold.actions:init_git' in out)
assert (not os.path.exists(args[0])) |
def repartition(table, outdir, npartitions=None, chunksize=None, compression='snappy'):
size = get_size_gb(table)
if (npartitions is None):
npartitions = max(1, size)
print(f'Converting {table} of {size} GB to {npartitions} parquet files, chunksize: {chunksize}')
read_csv_table(table, chunksize).repartition(npartitions=npartitions).to_parquet((outdir + table), compression=compression, index=False) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x, fake_relu=False):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
if fake_relu:
return FakeReLU.apply(out)
return self.relu(out) |
def settings_processor(request):
return {'adserver_ethicalads_branding': settings.ADSERVER_ETHICALADS_BRANDING, 'adserver_privacy_policy': settings.ADSERVER_PRIVACY_POLICY_URL, 'adserver_publisher_policy': settings.ADSERVER_PUBLISHER_POLICY_URL, 'adserver_version': settings.ADSERVER_VERSION, 'plausible_domain': settings.PLAUSIBLE_DOMAIN} |
def inspect_font(filename):
try:
info = ttf.TruetypeInfo(filename)
print('{0}:'.format(filename))
print(info.get_name('family'))
print(('bold=%r' % info.is_bold()))
print(('italic=%r' % info.is_italic()))
except:
print(('%s could not be identified. It is probably not a TrueType or OpenType font. However, pyglet may still be able to load it on some platforms.' % filename)) |
class BaseProxyView(View):
log_level = logging.DEBUG
log_security_level = logging.WARNING
impression_type = VIEWS
success_message = 'Billed impression'
def ignore_tracking_reason(self, request, advertisement, offer):
reason = None
ip_address = get_client_ip(request)
user_agent = get_client_user_agent(request)
parsed_ua = parse_user_agent(user_agent)
referrer = request.headers.get('referer')
geo_data = get_geolocation(request)
if (not offer):
log.log(self.log_level, 'Ad impression for unknown offer')
reason = 'Unknown offer'
elif (not advertisement.is_valid_offer(self.impression_type, offer)):
log.log(self.log_level, 'Old or nonexistent impression nonce')
reason = 'Old/Invalid nonce'
elif parsed_ua.is_bot:
log.log(self.log_level, 'Bot impression. User Agent: [%s]', user_agent)
reason = 'Bot impression'
elif ((not settings.DEBUG) and (ip_address in settings.INTERNAL_IPS)):
log.log(self.log_level, 'Internal IP impression. User Agent: [%s]', user_agent)
reason = 'Internal IP'
elif ((parsed_ua.os.family == 'Other') or (parsed_ua.browser.family == 'Other')):
log.log(self.log_level, 'Unknown user agent impression [%s]', user_agent)
reason = 'Unrecognized user agent'
elif (not request.user.is_anonymous):
log.log(self.log_level, 'Ignored known user ad impression')
reason = 'Known user impression'
elif is_blocklisted_user_agent(user_agent):
log.log(self.log_level, 'Blocked user agent impression [%s]', user_agent)
reason = 'Blocked UA impression'
elif is_blocklisted_referrer(referrer):
log.log(self.log_level, 'Blocklisted referrer [%s], Publisher: [%s], UA: [%s]', referrer, offer.publisher, user_agent)
reason = 'Blocked referrer impression'
elif is_blocklisted_ip(ip_address):
log.log(self.log_level, 'Blocked IP impression, Publisher: [%s]', offer.publisher)
reason = 'Blocked IP impression'
elif (not offer.publisher):
log.log(self.log_level, 'Ad impression for unknown publisher')
reason = 'Unknown publisher'
elif (not advertisement.flight.show_to_geo(geo_data)):
log.log(self.log_security_level, 'Invalid geo targeting for ad [%s]. Country: [%s], Region: [%s], Metro: [%s]', advertisement, geo_data.country, geo_data.region, geo_data.metro)
reason = 'Invalid targeting impression'
elif ((self.impression_type == CLICKS) and is_click_ratelimited(request)):
log.log(self.log_level, 'User has clicked too many ads recently, Publisher: [%s], UA: [%s]', offer.publisher, user_agent)
reason = 'Ratelimited click impression'
elif ((self.impression_type == VIEWS) and is_view_ratelimited(request)):
log.log(self.log_level, 'User has viewed too many ads recently, Publisher: [%s], UA: [%s]', offer.publisher, user_agent)
reason = 'Ratelimited view impression'
elif (offer and (offer.os_family != parsed_ua.os.family)):
log.log(self.log_security_level, 'Mismatched OS between offer and impression. Publisher: [%s], Offer OS: [%s], User agent: [%s]', offer.publisher, offer.os_family, user_agent)
reason = 'Mismatched OS'
elif (offer and (offer.browser_family != parsed_ua.browser.family)):
log.log(self.log_security_level, 'Mismatched browser between offer and impression. Publisher: [%s], Offer Browser: [%s], User agent: [%s]', offer.publisher, offer.browser_family, user_agent)
reason = 'Mismatched browser'
elif (offer.publisher.allowed_domains and (not is_allowed_domain(offer.url, offer.publisher.allowed_domains_as_list()))):
log.log(self.log_security_level, 'Offer URL is not on the allowed domain list. Publisher: [%s], Offer URL: [%s]', offer.publisher, offer.url)
if (offer and (offer.ip != anonymize_ip_address(ip_address))):
log.log(self.log_level, 'Mismatched IP between offer and impression. Publisher: [%s], Offer IP (anon): [%s]', offer.publisher, offer.ip)
return reason
def get_offer(self, nonce):
try:
offer = Offer.objects.get(id=nonce)
except (ValidationError, Offer.DoesNotExist) as exception:
log.debug('Invalid Offer. exception=%s', exception)
offer = None
return offer
def handle_action(self, request, advertisement, offer, publisher):
ignore_reason = self.ignore_tracking_reason(request, advertisement, offer)
if (not ignore_reason):
log.log(self.log_level, self.success_message)
advertisement.invalidate_nonce(self.impression_type, offer.pk)
advertisement.track_impression(request, self.impression_type, publisher=publisher, offer=offer)
if ((self.impression_type == CLICKS) and advertisement.flight.cpc):
publisher.increment_daily_earn(float(advertisement.flight.cpc))
if ((self.impression_type == VIEWS) and advertisement.flight.cpm):
publisher.increment_daily_earn((float(advertisement.flight.cpm) / 1000))
return ignore_reason
def get(self, request, advertisement_id, nonce):
advertisement = get_object_or_404(Advertisement, pk=advertisement_id)
offer = self.get_offer(nonce)
publisher = None
if offer:
publisher = offer.publisher
ignore_reason = self.handle_action(request, advertisement, offer, publisher)
message = (ignore_reason or self.success_message)
response = self.get_response(request, advertisement, publisher)
if (settings.DEBUG or settings.TESTING or request.user.is_staff):
response['X-Adserver-Reason'] = message
return response
def get_response(self, request, advertisement, publisher):
raise NotImplementedError |
class AveragerAcrossThresholds():
def __init__(self, imputer, percentiles=[10, 20, 30, 40, 50, 60, 70, 80, 90]):
self.imputer = imputer
self.percentiles = percentiles
def __call__(self, input_tensor: torch.Tensor, cams: np.ndarray, targets: List[Callable], model: torch.nn.Module):
scores = []
for percentile in self.percentiles:
imputer = self.imputer(percentile)
scores.append(imputer(input_tensor, cams, targets, model))
return np.mean(np.float32(scores), axis=0) |
class PushNegatives(SKCMatrixAndWeightTransformerABC):
_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return push_negatives(weights, axis=None)
_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return push_negatives(matrix, axis=0) |
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append('sqlite_master')
else:
tables.append('information_schema.tables')
tables.append('information_schema.views')
tables.append('information_schema.columns')
return tables |
class FromPackageLoader():
pkg_name: str
search_paths: Sequence[str]
def __init__(self, pkg_name: str, search_paths: Sequence[str]=('',)) -> None:
self.pkg_name = pkg_name
self.search_paths = search_paths
def __repr__(self):
return ('%s(%r, %r)' % (type(self).__name__, self.pkg_name, self.search_paths))
def __call__(self, base_path: Union[(None, str, PackageResource)], grammar_path: str) -> Tuple[(PackageResource, str)]:
if (base_path is None):
to_try = self.search_paths
else:
if ((not isinstance(base_path, PackageResource)) or (base_path.pkg_name != self.pkg_name)):
raise IOError()
to_try = [base_path.path]
err = None
for path in to_try:
full_path = os.path.join(path, grammar_path)
try:
text: Optional[bytes] = pkgutil.get_data(self.pkg_name, full_path)
except IOError as e:
err = e
continue
else:
return (PackageResource(self.pkg_name, full_path), (text.decode() if text else ''))
raise IOError('Cannot find grammar in given paths') from err |
def len_to_system(fil, item=None):
s = System()
e = Spheroid()
th = 0.0
for line in fil.readlines():
p = line.split()
if (not p):
continue
(cmd, args) = (p[0], p[1:])
if (cmd == 'LEN'):
s.description = ' '.join(args[1:(- 2)]).strip('"')
elif (cmd == 'UNI'):
s.scale = (float(args[0]) * 0.001)
elif (cmd == 'AIR'):
e.material = air
elif (cmd == 'TH'):
th = float(args[0])
if (th > 100.0):
th = np.inf
elif (cmd == 'AP'):
if (args[0] == 'CHK'):
del args[0]
e.radius = float(args[0])
elif (cmd == 'GLA'):
e.material = Material.make(args[0])
elif (cmd == 'AST'):
e.stop = True
elif (cmd == 'RD'):
e.curvature = (1 / float(args[0]))
elif (cmd in ('NXT', 'END')):
s.append(e)
e = Spheroid()
e.distance = th
elif (cmd in ('//', 'DES', 'EBR', 'GIH', 'DLRS', 'WW', 'WV')):
pass
else:
print(cmd, 'not handled', args)
return s |
class SignInBot():
async def sign_in_bot(self: 'pyrogram.Client', bot_token: str) -> 'types.User':
while True:
try:
r = (await self.invoke(raw.functions.auth.ImportBotAuthorization(flags=0, api_id=self.api_id, api_hash=self.api_hash, bot_auth_token=bot_token)))
except UserMigrate as e:
(await self.session.stop())
(await self.storage.dc_id(e.value))
(await self.storage.auth_key((await Auth(self, (await self.storage.dc_id()), (await self.storage.test_mode())).create())))
self.session = Session(self, (await self.storage.dc_id()), (await self.storage.auth_key()), (await self.storage.test_mode()))
(await self.session.start())
else:
(await self.storage.user_id(r.user.id))
(await self.storage.is_bot(True))
return types.User._parse(self, r.user) |
def convnet_arg_scope(is_training=True, weight_decay=5e-05, stddev=0.05):
batch_norm_params = {'is_training': is_training, 'center': True, 'scale': True, 'decay': 0.9999, 'epsilon': 0.001, 'zero_debias_moving_mean': True}
weights_init = tf.random_normal_initializer(0, stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with slim.arg_scope([slim.conv2d], weights_initializer=weights_init, activation_fn=lrelu, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer) as sc:
return sc |
def parse_
Response = collections.namedtuple('Response', ['ok', 'url', 'text', 'headers', 'status_code', 'reason', 'error'])
text = ''
status_code = 0
headers = {}
error_msg = ''
reason = ''
if hasattr(resp, 'text'):
text = resp.text
url = resp.url
status_code = resp.status_code
reason = resp.reason
headers = resp.headers
ok = bool((200 == status_code))
error_msg = f'{status_code} ({reason})'
else:
text = unescape_html(resp)
url = resp.geturl()
status_code = resp.status
ok = bool((200 == status_code))
reason = resp.reason
headers = dict(resp.info())
error_msg = f'{status_code} ({reason})'
return Response(ok=ok, url=url, text=text, headers=headers, status_code=status_code, reason=reason, error=error_msg) |
def test_cw_rx():
print('#### CW receiver ####')
cw = cw_rx()
print('# CW receiver parameters #')
assert (cw.bb_prop['fs'] == 20)
assert (cw.rf_prop['noise_figure'] == 12)
assert (cw.rf_prop['rf_gain'] == 20)
assert (cw.bb_prop['load_resistor'] == 1000)
assert (cw.bb_prop['baseband_gain'] == 50)
assert (cw.bb_prop['noise_bandwidth'] == cw.bb_prop['fs'])
print('# CW receiver channel #')
assert (cw.rxchannel_prop['size'] == 1)
assert np.array_equal(cw.rxchannel_prop['locations'], np.array([[0, 0, 0]]))
assert np.array_equal(cw.rxchannel_prop['az_angles'], [np.arange((- 90), 91, 180)])
assert np.array_equal(cw.rxchannel_prop['az_patterns'], [np.zeros(2)])
assert np.array_equal(cw.rxchannel_prop['el_angles'], [np.arange((- 90), 91, 180)])
assert np.array_equal(cw.rxchannel_prop['el_patterns'], [np.zeros(2)]) |
def test_admin_session_duplicate_session(clean_database, mock_emit_session_update, flask_app, mock_audit):
user1 = database.User.create(id=1234, name='The Name')
user2 = database.User.create(id=2345, name='Other Name')
session = database.MultiplayerSession.create(id=1, name='Debug', state=MultiplayerSessionVisibility.VISIBLE, creator=user1)
database.World.create(session=session, name='W1', preset='{}')
database.World.create(session=session, name='W2', preset='{"foo": 5}')
database.MultiplayerMembership.create(user=user1, session=session, admin=True)
database.MultiplayerMembership.create(user=user2, session=session, admin=True)
sa = MagicMock()
sa.get_current_user.return_value = user1
with flask_app.test_request_context():
session_admin.admin_session(sa, 1, SessionAdminGlobalAction.DUPLICATE_SESSION.value, 'new_name')
mock_emit_session_update.assert_not_called()
mock_audit.assert_called_once_with(sa, session, 'Duplicated session as new_name')
new_session = database.MultiplayerSession.get_by_id(2)
assert (new_session.name == 'new_name')
assert ([w.name for w in new_session.worlds] == ['W1', 'W2'])
assert ([w.preset for w in new_session.worlds] == ['{}', '{"foo": 5}'])
assert ([mem.user.name for mem in new_session.members] == ['The Name'])
assert ([a.message for a in new_session.audit_log] == ['Duplicated from Debug'])
assert (list(itertools.chain.from_iterable((w.associations for w in new_session.worlds))) == []) |
('torch.distributed._broadcast_coalesced', mock)
('torch.distributed.broadcast', mock)
('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_module_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
model = Model()
assert (not is_module_wrapper(model))
dp = DataParallel(model)
assert is_module_wrapper(dp)
mmdp = MMDataParallel(model)
assert is_module_wrapper(mmdp)
ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(ddp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(mmddp)
deprecated_mmddp = DeprecatedMMDDP(model)
assert is_module_wrapper(deprecated_mmddp)
_WRAPPERS.register_module()
class ModuleWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
module_wraper = ModuleWrapper(model)
assert is_module_wrapper(module_wraper) |
class MyghtyLexer(RegexLexer):
name = 'Myghty'
url = '
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
version_added = '0.6'
tokens = {'root': [('\\s+', Text), ('(?s)(<%(?:def|method))(\\s*)(.*?)(>)(.*?)(</%\\2\\s*>)', bygroups(Name.Tag, Text, Name.Function, Name.Tag, using(this), Name.Tag)), ('(?s)(<%\\w+)(.*?)(>)(.*?)(</%\\2\\s*>)', bygroups(Name.Tag, Name.Function, Name.Tag, using(PythonLexer), Name.Tag)), ('(<&[^|])(.*?)(,.*?)?(&>)', bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)), ('(?s)(<&\\|)(.*?)(,.*?)?(&>)', bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)), ('</&>', Name.Tag), ('(?s)(<%!?)(.*?)(%>)', bygroups(Name.Tag, using(PythonLexer), Name.Tag)), ('(?<=^)#[^\\n]*(\\n|\\Z)', Comment), ('(?<=^)(%)([^\\n]*)(\\n|\\Z)', bygroups(Name.Tag, using(PythonLexer), Other)), ("(?sx)\n (.+?) # anything, followed by:\n (?:\n (?<=\\n)(?=[%#]) | # an eval or comment line\n (?=</?[%&]) | # a substitution or block or\n # call start or end\n # - don't consume\n (\\\\\\n) | # an escaped newline\n \\Z # end of string\n )", bygroups(Other, Operator))]} |
def test_source_show_simple(tester: CommandTester) -> None:
tester.execute('')
expected = 'name : existing\nurl : : primary\n\nname : one\nurl : : primary\n\nname : two\nurl : : primary\n'.splitlines()
assert ([line.strip() for line in tester.io.fetch_output().strip().splitlines()] == expected)
assert (tester.status_code == 0) |
class ASPP(nn.Module):
def __init__(self, in_channels=2048, out_channels=256, output_stride=8):
super().__init__()
if (output_stride == 16):
dilations = [6, 12, 18]
elif (output_stride == 8):
dilations = [12, 24, 36]
else:
raise NotImplementedError
self.aspp0 = nn.Sequential(OrderedDict([('conv', nn.Conv2d(in_channels, out_channels, 1, bias=False)), ('bn', nn.BatchNorm2d(out_channels)), ('relu', nn.ReLU(inplace=True))]))
self.aspp1 = SeparableConv2d(in_channels, out_channels, dilation=dilations[0], relu_first=False)
self.aspp2 = SeparableConv2d(in_channels, out_channels, dilation=dilations[1], relu_first=False)
self.aspp3 = SeparableConv2d(in_channels, out_channels, dilation=dilations[2], relu_first=False)
self.image_pooling = nn.Sequential(OrderedDict([('gap', nn.AdaptiveAvgPool2d((1, 1))), ('conv', nn.Conv2d(in_channels, out_channels, 1, bias=False)), ('bn', nn.BatchNorm2d(out_channels)), ('relu', nn.ReLU(inplace=True))]))
self.conv = nn.Conv2d((out_channels * 5), out_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.1)
def forward(self, x):
pool = self.image_pooling(x)
pool = F.interpolate(pool, size=x.shape[2:], mode='bilinear', align_corners=False)
x0 = self.aspp0(x)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x = torch.cat((pool, x0, x1, x2, x3), dim=1)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
class Syncer(abc.ABC):
def name() -> str:
raise NotImplementedError
async def _get_diff(guild: Guild) -> _Diff:
raise NotImplementedError
async def _sync(diff: _Diff) -> None:
raise NotImplementedError
async def sync(cls, guild: Guild, ctx: (Context | None)=None) -> None:
log.info(f'Starting {cls.name} syncer.')
if ctx:
message = (await ctx.send(f' Synchronising {cls.name}s.'))
else:
message = None
diff = (await cls._get_diff(guild))
try:
(await cls._sync(diff))
except ResponseCodeError as e:
log.exception(f'{cls.name} syncer failed!')
results = f'''status {e.status}
```{(e.response_json or 'See log output for details')}```'''
content = f':x: Synchronisation of {cls.name}s failed: {results}'
else:
diff_dict = diff._asdict()
results = (f'{name} `{len(val)}`' for (name, val) in diff_dict.items() if (val is not None))
results = ', '.join(results)
log.info(f'{cls.name} syncer finished: {results}.')
content = f':ok_hand: Synchronisation of {cls.name}s complete: {results}'
if message:
(await message.edit(content=content)) |
class ews_input_addsubsec(unittest.TestCase):
def test(self):
run_test(self, ['-o 01 1379 500', '-a', '1', '2', '-s', '5', '6'], ' Month/Day/Year H:M:S 06/11/2006 00:08:12 GPS\n Modified Julian Date 53897. GPS\n GPSweek DayOfWeek SecOfWeek 355 0 492.000000\n FullGPSweek Zcount 1379 328\n Year DayOfYear SecondOfDay 2006 162 492.000000\n Unix: Second Microsecond 0\n Zcount: 29-bit (32-bit) ()\n') |
def rtn_strcasecmp(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('strcasecmp hooked')
s1 = pstate.get_argument_value(0)
s2 = pstate.get_argument_value(1)
size = min(len(pstate.memory.read_string(s1)), (len(pstate.memory.read_string(s2)) + 1))
ptr_bit_size = pstate.ptr_bit_size
ast = pstate.actx
res = ast.bv(0, pstate.ptr_bit_size)
for index in range(size):
cells1 = pstate.read_symbolic_memory_byte((s1 + index)).getAst()
cells2 = pstate.read_symbolic_memory_byte((s2 + index)).getAst()
cells1 = ast.ite(ast.land([(cells1 >= ord('a')), (cells1 <= ord('z'))]), (cells1 - 32), cells1)
cells2 = ast.ite(ast.land([(cells2 >= ord('a')), (cells2 <= ord('z'))]), (cells2 - 32), cells2)
res = (res + ast.ite((cells1 == cells2), ast.bv(0, ptr_bit_size), ast.bv(1, ptr_bit_size)))
return res |
_start_docstrings('\n XLM-RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.\n for Named-Entity-Recognition (NER) tasks.\n ', XLM_ROBERTA_START_DOCSTRING)
class TFXLMRobertaForTokenClassification(TFRobertaForTokenClassification):
config_class = XLMRobertaConfig |
def filter_model(desc, model_filter, protocol_filter=None):
if (protocol_filter is None):
protocol_filter = {'IOT', 'SMART'}
filtered = list()
for (file, protocol) in SUPPORTED_DEVICES:
if (protocol in protocol_filter):
file_model_region = basename(file).split('_')[0]
file_model = file_model_region.split('(')[0]
for model in model_filter:
if (model == file_model):
filtered.append((file, protocol))
filtered_basenames = [((basename(f) + '-') + p) for (f, p) in filtered]
print(f'# {desc}')
for file in filtered_basenames:
print(f' {file}')
return filtered |
class Solution():
def __init__(self):
self.temp = []
self.res = 0
def sumRootToLeaf(self, root: TreeNode, level=0) -> List[str]:
if (root is None):
return
while (len(self.temp) > level):
self.temp.pop()
self.temp.append(root.val)
level += 1
if ((not root.left) and (not root.right)):
string = ''.join([str(i) for i in self.temp])
self.res += int(string, 2)
else:
self.sumRootToLeaf(root.left, level)
self.sumRootToLeaf(root.right, level)
return self.res |
def import_class(import_str):
(mod_str, _sep, class_str) = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError(('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))) |
def func_attention(query, context, gamma1):
(batch_size, queryL) = (query.size(0), query.size(2))
(ih, iw) = (context.size(2), context.size(3))
sourceL = (ih * iw)
context = context.view(batch_size, (- 1), sourceL)
contextT = torch.transpose(context, 1, 2).contiguous()
attn = torch.bmm(contextT, query)
attn = attn.view((batch_size * sourceL), queryL)
attn = nn.Softmax()(attn)
attn = attn.view(batch_size, sourceL, queryL)
attn = torch.transpose(attn, 1, 2).contiguous()
attn = attn.view((batch_size * queryL), sourceL)
attn = (attn * gamma1)
attn = nn.Softmax()(attn)
attn = attn.view(batch_size, queryL, sourceL)
attnT = torch.transpose(attn, 1, 2).contiguous()
weightedContext = torch.bmm(context, attnT)
return (weightedContext, attn.view(batch_size, (- 1), ih, iw)) |
class AttributeNestedSerializer(AttributeListSerializer):
elements = serializers.SerializerMethodField()
class Meta(AttributeListSerializer.Meta):
fields = (*AttributeListSerializer.Meta.fields, 'elements')
def get_elements(self, obj):
return AttributeNestedSerializer(obj.get_children(), many=True, read_only=True, context=self.context).data |
class InverseHammerTest(unittest.TestCase):
def setUpClass(self):
self.p = Proj(proj='hammer')
(self.x, self.y) = self.p((- 30), 40)
def test_forward(self):
self.assertAlmostEqual(self.x, (- 2711575.083), places=3)
self.assertAlmostEqual(self.y, 4395506.619, places=3)
def test_inverse(self):
(lon, lat) = self.p(self.x, self.y, inverse=True)
self.assertAlmostEqual(lon, (- 30.0), places=3)
self.assertAlmostEqual(lat, 40.0, places=3) |
def get_auth_credentials(service, site, url, majorversion, token, timeout):
url = fillurl(service, site, url, majorversion, 'auth')
f = _request(url, timeout=timeout, post=token)
s = f.read().decode()
try:
(user, passwd) = s.strip().split(':')
except ValueError:
raise CannotGetCredentialsFromAuthRequest(('data="%s"' % s))
return (user, passwd) |
def test_field_without_parameters():
with pytest.raises(ValueError, match=full_match_regex_str("Fields {'a'} do not bound to any parameter")):
InputShape(constructor=stub_constructor, kwargs=None, fields=(InputField(id='a', type=int, default=NoDefault(), is_required=True, metadata={}, original=None),), params=(), overriden_types=frozenset({'a'})) |
class QuantizableMobileHairNet(MobileHairNet):
def __init__(self):
super(QuantizableMobileHairNet, self).__init__(encode_block=QuantizableLayerDepwiseEncode, decode_block=QuantizableLayerDepwiseDecode)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
self.f_add = FloatFunctional()
def forward(self, x):
x = self.quant(x)
x = self._forward_implement(x)
x = self.dequant(x)
return x
def _forward_implement(self, x):
encode_layer1 = self.encode_layer1(x)
encode_layer2 = self.encode_layer2(encode_layer1)
encode_layer3 = self.encode_layer3(encode_layer2)
encode_layer4 = self.encode_layer4(encode_layer3)
encode_layer5 = self.encode_layer5(encode_layer4)
encode_layer4 = self.encode_to_decoder4(encode_layer4)
encode_layer3 = self.encode_to_decoder3(encode_layer3)
encode_layer2 = self.encode_to_decoder2(encode_layer2)
encode_layer1 = self.encode_to_decoder1(encode_layer1)
decode_layer1 = self.f_add.add(self.decode_layer1(encode_layer5), encode_layer4)
decode_layer2 = self.f_add.add(self.decode_layer2(decode_layer1), encode_layer3)
decode_layer3 = self.f_add.add(self.decode_layer3(decode_layer2), encode_layer2)
decode_layer4 = self.f_add.add(self.decode_layer4(decode_layer3), encode_layer1)
decode_layer5 = self.decode_layer5(decode_layer4)
out = decode_layer5
return out
def fuse_model(self) -> None:
for m in self.modules():
if (type(m) is ConvNormActivation):
fuse_modules(m, ['0', '1', '2'], inplace=True)
if ((type(m) is QuantizableInvertedResidual) or (type(m) is QuantizableLayerDepwiseDecode)):
m.fuse_model()
def quantize(self) -> None:
self.qconfig = torch.quantization.get_default_qat_qconfig('fbgemm')
self.eval()
self.fuse_model()
self.train()
torch.quantization.prepare_qat(self, inplace=True)
torch.quantization.convert(self, inplace=True) |
def test_struct_type():
test_dict = {'type': 'struct', 'fields': [{'type': 'int', 'bits': 32}, {'type': 'string', 'bytes': 32}]}
recap_type = from_dict(test_dict)
assert isinstance(recap_type, StructType)
assert (recap_type.type_ == 'struct')
for field in recap_type.fields:
if isinstance(field, IntType):
assert (field.type_ == 'int')
assert (field.bits == 32)
elif isinstance(field, StringType):
assert (field.type_ == 'string')
assert (field.bytes_ == 32) |
def convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch(logbase, dataset, loadpath, epoch, device):
(gpt, gpt_epoch) = utils.load_model(logbase, dataset, loadpath, epoch=epoch, device=device)
trajectory_transformer = TrajectoryTransformerModel(gpt.config)
trajectory_transformer.tok_emb.load_state_dict(gpt.tok_emb.state_dict())
trajectory_transformer.pos_emb = gpt.pos_emb
trajectory_transformer.drop.load_state_dict(gpt.drop.state_dict())
trajectory_transformer.ln_f.load_state_dict(gpt.ln_f.state_dict())
trajectory_transformer.head.load_state_dict(gpt.head.state_dict())
for (i, block) in enumerate(gpt.blocks):
trajectory_transformer.blocks[i].ln1.load_state_dict(gpt.blocks[i].ln1.state_dict())
trajectory_transformer.blocks[i].ln2.load_state_dict(gpt.blocks[i].ln2.state_dict())
trajectory_transformer.blocks[i].attn.load_state_dict(gpt.blocks[i].attn.state_dict())
trajectory_transformer.blocks[i].l1.load_state_dict(gpt.blocks[i].mlp[0].state_dict())
trajectory_transformer.blocks[i].act.load_state_dict(gpt.blocks[i].mlp[1].state_dict())
trajectory_transformer.blocks[i].l2.load_state_dict(gpt.blocks[i].mlp[2].state_dict())
trajectory_transformer.blocks[i].drop.load_state_dict(gpt.blocks[i].mlp[3].state_dict())
torch.save(trajectory_transformer.state_dict(), 'pytorch_model.bin') |
class KgCVAEConfig(object):
description = None
use_hcf = True
update_limit = 3000
api_dir = 'data/cambridge_data/api_cambridge.pkl'
rev_vocab_dir = 'data/cambridge_data/rev_vocab.pkl'
n_state = 10
cell_type = 'lstm'
encoding_cell_size = 400
state_cell_size = n_state
embed_size = 300
max_utt_len = 40
max_dialog_len = 10
num_layer = 1
op = 'adam'
grad_clip = 5.0
init_w = 0.08
batch_size = 16
init_lr = 0.001
lr_hold = 1
lr_decay = 0.6
keep_prob = 0.6
improve_threshold = 0.996
patient_increase = 2.0
early_stop = True
max_epoch = 60
grad_noise = 0.0
with_bow_loss = True
bow_loss_weight = 0.4
n_epoch = 10
with_label_loss = False
with_BPR = True
with_direct_transition = False
with_word_weights = False
if with_word_weights:
with open(rev_vocab_dir, 'r') as fh:
rev_vocab = pkl.load(fh)
slot_value_id_list = []
for (k, v) in rev_vocab.items():
if (('slot_' in k) or ('value_' in k)):
slot_value_id_list.append(v)
multiply_factor = 3
one_weight = (1.0 / (len(rev_vocab) + ((multiply_factor - 1) * len(slot_value_id_list))))
word_weights = ([one_weight] * len(rev_vocab))
for i in slot_value_id_list:
word_weights[i] = (multiply_factor * word_weights[i])
sum_word_weights = np.sum(word_weights)
assert (sum_word_weights == float(1.0))
word_weights = list((len(rev_vocab) * np.array(word_weights)))
else:
word_weights = None |
def finish_perf_region(label):
from pycket.prims.general import current_gc_time
if os_check_env_var('PLT_LINKLET_TIMES'):
assert (len(linklet_perf.current_start_time) > 0)
delta = (rtime.time() - linklet_perf.current_start_time[(- 1)])
delta_gc = (current_gc_time() - linklet_perf.current_gc_start_time[(- 1)])
table_add(linklet_perf.region_times, label, delta)
table_add(linklet_perf.region_gc_times, label, delta_gc)
table_add(linklet_perf.region_counts, label, 1)
linklet_perf.current_start_time.pop()
linklet_perf.current_gc_start_time.pop()
for i in range(len(linklet_perf.current_start_time)):
linklet_perf.current_start_time[i] += delta
linklet_perf.current_gc_start_time[i] += delta_gc |
def main():
args = parse_args()
setup_repo(args.cpython_repo, args.branch)
run(*['sphinx-build', '-jauto', '-QDgettext_compact=0', '-bgettext', '.', '../pot'], cwd=(args.cpython_repo / 'Doc'))
pot_path = (args.cpython_repo / 'pot')
upstream = {file.relative_to(pot_path).with_suffix('.po') for file in pot_path.glob('**/*.pot')}
downstream = {Path(po) for po in run('git', 'ls-files', '*.po', stdout=PIPE).stdout.splitlines()}
copy_new_files((upstream - downstream), pot_path=pot_path)
update_known_files((upstream & downstream), pot_path=pot_path)
remove_old_files((downstream - upstream))
clean_paths(((upstream - downstream) | (upstream & downstream)))
shutil.rmtree(pot_path)
run('powrap', '-m')
update_makefile(args.cpython_repo)
git_add_relevant_files() |
(everythings(min_int=(- ), max_int=))
def test_msgpack(everything: Everything):
from msgpack import dumps as msgpack_dumps
from msgpack import loads as msgpack_loads
converter = msgpack_make_converter()
raw = msgpack_dumps(converter.unstructure(everything))
assert (converter.structure(msgpack_loads(raw, strict_map_key=False), Everything) == everything) |
.skipif((not (torch.cuda.device_count() >= 2)), reason='not enough cuda devices')
class TestFSDP():
class MyDModule(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(8, 8, bias=False)
self.fc2 = nn.Linear(8, 8, bias=False)
self.relu = nn.ReLU()
for p in self.parameters():
p.data.fill_(1.0)
def forward(self, input):
return self.relu((self.fc1(input) + self.fc2(input)))
def make_module(cls, device=None):
with (torch.device(f'cuda:{device}') if (device is not None) else torch.device('cuda')):
my_module = cls.MyDModule()
my_sharded_module = FSDP(my_module, device_id=device)
return my_sharded_module
def worker(cls, rank, path):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '10017'
torch.distributed.init_process_group('nccl', rank=rank, world_size=2, init_method='tcp://localhost:10017')
torch.cuda.set_device(rank)
module = cls.make_module(rank)
dist.barrier()
td = TensorDict.from_module(module, use_state_dict=True)
if (rank == 0):
td.memmap(path)
dist.destroy_process_group()
def test_fsdp_module(self, tmpdir):
try:
mp.set_start_method('spawn')
except Exception:
print('start method already set to', mp.get_start_method())
proc0 = mp.Process(target=self.worker, args=(0, tmpdir))
proc1 = mp.Process(target=self.worker, args=(1, tmpdir))
proc0.start()
proc1.start()
proc0.join(timeout=TIMEOUT)
proc1.join(timeout=TIMEOUT)
assert (TensorDict.load_memmap(tmpdir) == 1).all() |
class InaccessibleSysPath(fixtures.OnSysPath, ffs.TestCase):
site_dir = '/access-denied'
def setUp(self):
super().setUp()
self.setUpPyfakefs()
self.fs.create_dir(self.site_dir, perm_bits=0)
def test_discovery(self):
list(importlib_metadata.distributions()) |
class RSAPublicKey(PublicKey):
def __init__(self, public_key: rsa.RSAPublicKey):
self._public_key = public_key
def verify(self, data: bytes, signature: bytes) -> bool:
try:
signature = base64.b64decode(signature)
self._public_key.verify(signature, data, _RSA_PADDING, _RSA_HASH_ALGORITHM)
return True
except (ValueError, exceptions.InvalidSignature):
return False
def to_bytes(self) -> bytes:
return self._public_key.public_bytes(encoding=serialization.Encoding.OpenSSH, format=serialization.PublicFormat.OpenSSH)
def from_bytes(cls, key: bytes) -> RSAPublicKey:
key = serialization.load_ssh_public_key(key)
if (not isinstance(key, rsa.RSAPublicKey)):
raise ValueError(f'Expected an RSA public key, got {key}')
return cls(key) |
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str, default='./data', help='Directory for splitted dataset')
parser.add_argument('--no_subset', action='store_true', help='Do not create subsets for training and testing')
parser.add_argument('--train_size', type=int, help='Size of training dataset')
parser.add_argument('--test_size', type=int, help='Size of testing dataset')
parser.add_argument('--seed', type=int, default=0, help='Random state')
parser.add_argument('--precompute', type=str2bool, default=True, help='Precompute intermediate statistics')
parser.add_argument('--n_jobs', type=int, default=1, help='Number of workers')
parser.add_argument('--device', type=str, default='cpu', help='GPU device id')
parser.add_argument('--batch_size', type=int, default=512, help='Batch size for FCD calculation')
return parser |
.parametrize('endpoint, params', [(UserRobot, {'robot_shortname': 'dtrobot'}), (OrgRobot, {'orgname': 'buynlarge', 'robot_shortname': 'coolrobot'})])
def test_retrieve_robot(endpoint, params, app):
with client_with_identity('devtable', app) as cl:
result = conduct_api_call(cl, endpoint, 'GET', params, None)
assert (result.json['token'] is not None) |
class F12_UserData(F8_UserData):
removedKeywords = F8_UserData.removedKeywords
removedAttrs = F8_UserData.removedAttrs
def __init__(self, *args, **kwargs):
F8_UserData.__init__(self, *args, **kwargs)
self.gecos = kwargs.get('gecos', '')
def _getArgsAsStr(self):
retval = F8_UserData._getArgsAsStr(self)
if self.gecos:
retval += (' --gecos="%s"' % (self.gecos,))
return retval |
class Transformer(nn.Module):
def __init__(self, dim, depth, heads=8, dim_head=64, mlp_mult=4, local_patch_size=7, global_k=7, dropout=0.0, has_local=True):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([(Residual(PreNorm(dim, LocalAttention(dim, heads=heads, dim_head=dim_head, dropout=dropout, patch_size=local_patch_size))) if has_local else nn.Identity()), (Residual(PreNorm(dim, FeedForward(dim, mlp_mult, dropout=dropout))) if has_local else nn.Identity()), Residual(PreNorm(dim, GlobalAttention(dim, heads=heads, dim_head=dim_head, dropout=dropout, k=global_k))), Residual(PreNorm(dim, FeedForward(dim, mlp_mult, dropout=dropout)))]))
def forward(self, x):
for (local_attn, ff1, global_attn, ff2) in self.layers:
x = local_attn(x)
x = ff1(x)
x = global_attn(x)
x = ff2(x)
return x |
def getattribute_from_module(module, attr):
if (attr is None):
return None
if isinstance(attr, tuple):
return tuple((getattribute_from_module(module, a) for a in attr))
if hasattr(module, attr):
return getattr(module, attr)
pixel_module = importlib.import_module('pixel')
if hasattr(pixel_module, attr):
return getattr(pixel_module, attr)
transformers_module = importlib.import_module('transformers')
return getattribute_from_module(transformers_module, attr) |
class VectorStrategy(object):
__metaclass__ = SingletonMeta
def is_correct_type(self, w_vector, w_obj):
raise NotImplementedError('abstract base class')
def immutable(self):
return False
def ref(self, w_vector, i, check=True):
if check:
self.indexcheck(w_vector, i)
return self._ref(w_vector, i)
def set(self, w_vector, i, w_val, check=True):
if check:
self.indexcheck(w_vector, i)
if (not self.is_correct_type(w_vector, w_val)):
self.dehomogenize(w_vector, hint=w_val)
w_vector.unsafe_set(i, w_val)
else:
self._set(w_vector, i, w_val)
def indexcheck(self, w_vector, i):
assert (0 <= i < w_vector.length())
def _ref(self, w_vector, i):
raise NotImplementedError('abstract base class')
def _set(self, w_vector, i, w_val):
raise NotImplementedError('abstract base class')
def _length(self, w_vector):
return w_vector.get_len()
def ref_all(self, w_vector):
raise NotImplementedError('abstract base class')
def create_storage_for_element(self, element, times):
raise NotImplementedError('abstract base class')
def create_storage_for_elements(self, elements):
raise NotImplementedError('abstract base class')
def dehomogenize(self, w_vector, hint):
w_vector.change_strategy(ObjectVectorStrategy.singleton) |
class MegatronTrainer(Trainer):
def __init__(self, args, task, model, criterion):
if (not has_megatron_submodule):
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/model_parallel/megatron')
super().__init__(args, task, model, criterion)
def data_parallel_world_size(self):
return get_data_parallel_world_size()
def data_parallel_process_group(self):
return get_data_parallel_group()
def data_parallel_rank(self):
return get_data_parallel_rank()
def is_data_parallel_master(self):
return (get_model_parallel_src_rank() == 0)
def clip_grad_norm(self, clip_norm):
def _aggregate_model_parallel_grad_norm(total_norm):
total_norm = (total_norm ** 2)
distributed_utils.all_reduce(total_norm, group=get_model_parallel_group())
total_norm = (total_norm ** 0.5)
return total_norm
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=_aggregate_model_parallel_grad_norm) |
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle('PyQtConfig Demo')
self.config = ConfigManager()
CHOICE_A = 1
CHOICE_B = 2
CHOICE_C = 3
CHOICE_D = 4
map_dict = {'Choice A': CHOICE_A, 'Choice B': CHOICE_B, 'Choice C': CHOICE_C, 'Choice D': CHOICE_D}
self.config.set_defaults({'number': 13, 'text': 'hello', 'active': True, 'combo': CHOICE_C})
gd = QGridLayout()
sb = QSpinBox()
gd.addWidget(sb, 0, 1)
self.config.add_handler('number', sb)
te = QLineEdit()
gd.addWidget(te, 1, 1)
self.config.add_handler('text', te)
cb = QCheckBox()
gd.addWidget(cb, 2, 1)
self.config.add_handler('active', cb)
cmb = QComboBox()
cmb.addItems(map_dict.keys())
gd.addWidget(cmb, 3, 1)
self.config.add_handler('combo', cmb, mapper=map_dict)
self.current_config_output = QTextEdit()
gd.addWidget(self.current_config_output, 0, 3, 3, 1)
self.config.updated.connect(self.show_config)
self.show_config()
self.window = QWidget()
self.window.setLayout(gd)
self.setCentralWidget(self.window)
def show_config(self):
self.current_config_output.setText(str(self.config.as_dict())) |
def setup_sphinx_tabs(app, config):
if (sphinx.version_info < (3, 0, 0)):
listeners = list(app.events.listeners.get('html-page-context').items())
else:
listeners = [(listener.id, listener.handler) for listener in app.events.listeners.get('html-page-context')]
for (listener_id, function) in listeners:
module_name = inspect.getmodule(function).__name__
if (module_name == 'sphinx_tabs.tabs'):
app.disconnect(listener_id) |
class ModuleUnloadedBreakpoint():
def __init__(self, target):
breakpoint = target.BreakpointCreateByName('oe_debug_module_unloaded_hook')
breakpoint.SetScriptCallbackFunction('lldb_sgx_plugin.ModuleUnloadedBreakpoint.onHit')
def onHit(frame, bp_loc, dict):
library_image_addr = frame.FindValue('rdi', lldb.eValueTypeRegister).signed
library_image = oe_debug_module_t(library_image_addr)
unload_enclave_symbol(library_image.path, library_image.base_address)
return False |
class TestTrainingExtensionsSvd(unittest.TestCase):
def test_pick_compression_layers_top_x_percent(self):
logger.debug(self.id())
model = MnistModel().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated._perform_layer_selection'):
layer_selector = ls.LayerSelectorDeprecated(aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, None, layer_db, percent_thresh=None)
picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=100)
self.assertEqual(model.fc1, picked_layers[0].module)
self.assertEqual(model.conv2, picked_layers[1].module)
self.assertEqual(model.fc2, picked_layers[2].module)
self.assertEqual(3, len(picked_layers))
picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=80)
self.assertEqual(model.conv2, picked_layers[0].module)
self.assertEqual(model.fc2, picked_layers[1].module)
self.assertEqual(2, len(picked_layers))
def test_pick_compression_layers_top_n_layers(self):
logger.debug(self.id())
model = MnistModel().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated._perform_layer_selection'):
layer_selector = ls.LayerSelectorDeprecated(aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, None, layer_db, num_layers=2)
picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
self.assertEqual(picked_layers[0].module, model.fc1)
self.assertEqual(picked_layers[1].module, model.conv2)
self.assertEqual(2, len(picked_layers))
picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.mac, layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
self.assertEqual(picked_layers[0].module, model.conv2)
self.assertEqual(picked_layers[1].module, model.fc1)
self.assertEqual(2, len(picked_layers))
def test_pick_compression_layers_manual(self):
logger.debug(self.id())
model = MnistModel().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated._perform_layer_selection'):
layer_selector = ls.LayerSelectorDeprecated(aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, None, layer_db, layers_to_compress=[model.conv2])
picked_layers = layer_selector._pick_compression_layers(run_model=mnist_model.evaluate, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_select_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=[model.conv2])
self.assertEqual(1, len(picked_layers))
self.assertEqual(picked_layers[0].module, model.conv2)
def test_split_conv_layer_with_mo(self):
logger.debug(self.id())
model = mnist_model.Net().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
svd = s.SvdImpl(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
conv2 = layer_db.find_layer_by_module(model.conv2)
pymo_utils.PymoSvdUtils.configure_layers_in_pymo_svd([conv2], aimet_common.defs.CostMetric.mac, svd._svd_lib_ref)
split_layer = svd_pruner_deprecated.DeprecatedSvdPruner
(seq, conv_a, conv_b) = split_layer.prune_layer(conv2, 28, svd._svd_lib_ref)
print('\n')
weight_arr = conv_a.module.weight.detach().numpy().flatten()
weight_arr = weight_arr[0:10]
print(weight_arr)
self.assertEqual((28, model.conv2.in_channels, 1, 1), conv_a.module.weight.shape)
self.assertEqual([28], list(conv_a.module.bias.shape))
self.assertEqual((model.conv2.out_channels, 28, 5, 5), conv_b.module.weight.shape)
self.assertEqual([model.conv2.out_channels], list(conv_b.module.bias.shape))
self.assertEqual(model.conv2.stride, conv_a.module.stride)
self.assertEqual(model.conv2.stride, conv_b.module.stride)
self.assertEqual((0, 0), conv_a.module.padding)
self.assertEqual(model.conv2.padding, conv_b.module.padding)
self.assertEqual((1, 1), conv_a.module.kernel_size)
self.assertEqual(model.conv2.kernel_size, conv_b.module.kernel_size)
def test_split_fc_layer_without_mo(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
logger.debug(self.id())
model = MnistModel().to('cpu')
with unittest.mock.patch('aimet_torch.layer_database.LayerDatabase'):
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
svd = s.SvdImpl(model=model, run_model=None, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
layer_attr = Layer(model.fc1, id(model.fc1), [3136, 1024, 1, 1])
svd._svd_lib_ref = create_autospec(pymo.Svd, instance=True)
split_weights = [np.zeros((400, model.fc1.in_features)).flatten().tolist(), np.zeros((model.fc1.out_features, 400)).flatten().tolist()]
svd._svd_lib_ref.SplitLayerWeights.return_value = split_weights
split_biases = [np.zeros(400).flatten().tolist(), np.zeros(model.fc1.out_features).flatten().tolist()]
svd._svd_lib_ref.SplitLayerBiases.return_value = split_biases
split_layer = svd_pruner_deprecated.DeprecatedSvdPruner
(seq, layer_a_attr, layer_b_attr) = split_layer.prune_layer(layer_attr, 400, svd_lib_ref=svd._svd_lib_ref)
self.assertEqual((400, model.fc1.in_features), seq[0].weight.shape)
self.assertEqual([400], list(seq[0].bias.shape))
self.assertEqual((model.fc1.out_features, 400), seq[1].weight.shape)
self.assertEqual([model.fc1.out_features], list(seq[1].bias.shape))
self.assertEqual(layer_a_attr.module, seq[0])
self.assertEqual(layer_b_attr.module, seq[1])
def test_create_compressed_model(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
logger.debug(self.id())
model = MnistModel().to('cpu')
with unittest.mock.patch('aimet_torch.svd.layer_database.LayerDatabase'):
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
svd = s.SvdImpl(model=model, run_model=None, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
ls.LayerSelectorDeprecated._pick_compression_layers = create_autospec(ls.LayerSelectorDeprecated._pick_compression_layers)
layer_attr1 = Layer(model.fc2, id(model.fc2), model.fc2.weight.shape)
layer_attr1.parent_module = model
layer_attr1.var_name_of_module_in_parent = 'fc2'
layer_attr1.output_shape = [0, 0, 1, 1]
layer_attr1.name = 'fc2'
layer_attr2 = Layer(model.conv2, id(model.conv2), model.conv2.weight.shape)
layer_attr2.parent_module = model
layer_attr2.var_name_of_module_in_parent = 'conv2'
layer_attr2.name = 'conv2'
layer_attr1.output_shape = [0, 0, 14, 14]
ls.LayerSelectorDeprecated._pick_compression_layers.return_value = [layer_attr1, layer_attr2]
svd._compressible_layers = {id(model.conv2): layer_attr2, id(model.fc2): layer_attr1}
ls.LayerSelectorDeprecated._perform_layer_selection(model)
svd._select_candidate_ranks(20)
svd_rank_pair_dict = {'conv2': (31, 0), 'fc2': (9, 0)}
(c_model, c_layer_attr, _) = svd._create_compressed_model(svd_rank_pair_dict)
self.assertTrue((c_model is not model))
self.assertTrue((c_model.conv1 is not model.conv1))
self.assertTrue((c_model.conv2 is not model.conv2))
self.assertFalse(isinstance(svd._model, nn.Sequential))
self.assertEqual((9, 1024), c_model.fc2[0].weight.shape)
self.assertEqual([9], list(c_model.fc2[0].bias.shape))
self.assertEqual((10, 9), c_model.fc2[1].weight.shape)
self.assertEqual([10], list(c_model.fc2[1].bias.shape))
self.assertEqual((31, 32, 1, 1), c_model.conv2[0].weight.shape)
self.assertEqual([31], list(c_model.conv2[0].bias.shape))
self.assertEqual((64, 31, 5, 5), c_model.conv2[1].weight.shape)
self.assertEqual([64], list(c_model.conv2[1].bias.shape))
self.assertEqual(svd._model.conv1.weight.shape, c_model.conv1.weight.shape)
self.assertEqual(svd._model.fc1.weight.shape, c_model.fc1.weight.shape)
self.assertEqual(4, len(c_layer_attr))
def test_svd_with_mo(self):
logger.debug(self.id())
model = MnistModel().to('cpu')
svd = s.SvdImpl(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=60)
(c_model, svd_stats) = svd.compress_net(rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, num_rank_indices=20, error_margin=10)
svd_stats.pretty_print(logger=logger)
def test_svd_sequential_with_mo(self):
logger.debug(self.id())
model = MnistSequentialModel().to('cpu')
svd = s.SvdImpl(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=60)
(c_model, svd_stats) = svd.compress_net(rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, num_rank_indices=20, error_margin=10)
svd_stats.pretty_print(logger=None)
def test_set_parent_attribute_two_deep(self):
class SubNet(nn.Module):
def __init__(self):
super(SubNet, self).__init__()
self.conv1 = nn.Conv2d(30, 40, 5)
self.conv2 = nn.Conv2d(40, 50, kernel_size=5)
def forward(self, *inputs):
pass
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = SubNet()
self.fc1 = nn.Linear(320, 50)
self.subnet2 = SubNet()
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
pass
net = Net()
model = net.to('cpu')
output_activation_shape = None
layers = {id(model.subnet1.conv2): Layer(model.subnet1.conv2, id(model.subnet1.conv2), output_activation_shape), id(model.subnet2.conv1): Layer(model.subnet2.conv1, id(model.subnet2.conv1), output_activation_shape), id(model.fc2): Layer(model.fc2, id(model.fc2), output_activation_shape)}
LayerDatabase.set_reference_to_parent_module(model, layers)
self.assertEqual(model.subnet1, layers[id(model.subnet1.conv2)].parent_module)
self.assertEqual(model.subnet2, layers[id(model.subnet2.conv1)].parent_module)
self.assertEqual(model, layers[id(model.fc2)].parent_module)
def test_set_attributes_with_sequentials(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(10, 20, 5))
self.fc1 = nn.Linear(320, 50)
self.subnet2 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(1, 10, 5))
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
pass
net = Net()
model = net.to('cpu')
output_activation_shape = None
layers = {id(model.subnet1[2]): Layer(model.subnet1[2], id(model.subnet1[2]), output_activation_shape), id(model.subnet2[0]): Layer(model.subnet2[0], id(model.subnet2[0]), output_activation_shape), id(model.fc2): Layer(model.fc2, id(model.fc2), output_activation_shape)}
LayerDatabase.set_reference_to_parent_module(model, layers)
self.assertEqual(model.subnet1, layers[id(model.subnet1[2])].parent_module)
self.assertEqual(model.subnet2, layers[id(model.subnet2[0])].parent_module)
self.assertEqual(model, layers[id(model.fc2)].parent_module)
def test_set_parent_attribute_with_sequential_two_deep(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.subnet1 = nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Sequential(nn.Conv2d(1, 10, 5), nn.ReLU(), nn.Conv2d(20, 50, 5)), nn.Conv2d(1, 10, 5))
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, *inputs):
pass
net = Net()
model = net.to('cpu')
output_activation_shape = None
layers = {id(model.subnet1[0]): Layer(model.subnet1[0], None, output_activation_shape), id(model.subnet1[2][0]): Layer(model.subnet1[2][0], None, output_activation_shape), id(model.subnet1[2][2]): Layer(model.subnet1[2][2], None, output_activation_shape)}
LayerDatabase.set_reference_to_parent_module(model, layers)
self.assertEqual(model.subnet1, layers[id(model.subnet1[0])].parent_module)
self.assertEqual(model.subnet1[2], layers[id(model.subnet1[2][0])].parent_module)
self.assertEqual(model.subnet1[2], layers[id(model.subnet1[2][2])].parent_module)
def test_choose_best_ranks(self):
model = MnistModel().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
run_model_return_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
run_model = unittest.mock.Mock(side_effect=run_model_return_values)
with unittest.mock.patch('aimet_torch.layer_database.LayerDatabase'):
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
svd = s.SvdImpl(model=model, run_model=run_model, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
svd._network_cost = (500, 500)
svd._svd_lib_ref = create_autospec(pymo.Svd, instance=True)
with unittest.mock.patch('aimet_torch.svd.model_stats_calculator.ModelStats.compute_compression_ratio') as compute_compression_ratio:
with unittest.mock.patch('aimet_torch.svd.svd_pruner_deprecated.ModelPruner.create_compressed_model') as create_compressed_model:
with unittest.mock.patch('aimet_torch.svd.rank_selector.RankSelector._select_candidate_ranks') as select_candidate_ranks:
select_candidate_ranks.return_value = 20
compute_compression_ratio.side_effect = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
create_compressed_model.return_value = (None, None, None)
rank_selector = rank_select.RankSelector(svd_lib_ref=svd._svd_lib_ref)
rank_selector.choose_best_rank(model=model, run_model=run_model, run_model_iterations=1, use_cuda=False, metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, error_margin=1, baseline_perf=0.5, num_rank_indices=20, database=layer_db)
def test_validate_params(self):
si = svd_intf
model = MnistModel()
si.Svd._validate_layer_rank_params(model, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=[], rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.manual, layer_rank_list=[])
si.Svd._validate_layer_rank_params(model, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=[], rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
si.Svd._validate_layer_rank_params(model, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=0, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
si.Svd._validate_layer_rank_params(model, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=1, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.manual)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.manual, error_margin=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.manual, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, layers_to_compress=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, layer_rank_list=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, percent_thresh=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=(- 1), rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=0, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=6, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, num_layers=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=(- 1), rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_x_percent, percent_thresh=101, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
with pytest.raises(ValueError):
si.Svd._validate_layer_rank_params(model, aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=None, percent_thresh=None, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, error_margin=None, num_rank_indices=None)
def test_compress_model_no_iterations(self):
model = MnistModel().to('cpu')
with pytest.raises(ValueError):
(_, _) = svd_intf.Svd.compress_model(model=model, run_model=mnist_model.evaluate, run_model_iterations=0, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.mac, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, layers_to_compress=[model.conv2, model.fc2], num_rank_indices=20, error_margin=100)
def test_compress_model(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
logger.debug(self.id())
model = MnistModel().to('cpu')
(c_model, stats) = svd_intf.Svd.compress_model(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.mac, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, layers_to_compress=[model.conv2, model.fc2], num_rank_indices=20, error_margin=100)
self.assertTrue((c_model.conv2[0].bias is not None))
self.assertTrue((c_model.conv2[1].bias is not None))
self.assertTrue((c_model.fc2[0].bias is not None))
self.assertTrue((c_model.fc2[1].bias is not None))
self.assertEqual(2, len(stats.per_rank_index[0].per_selected_layer))
self.assertEqual('conv2', stats.per_rank_index[0].per_selected_layer[0].layer_name)
self.assertEqual('fc2', stats.per_rank_index[0].per_selected_layer[1].layer_name)
def test_compress_model_no_bias(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
logger.debug(self.id())
model = MnistModel().to('cpu')
model.conv2.bias = None
model.fc2.bias = None
(c_model, stats) = svd_intf.Svd.compress_model(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.mac, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, layers_to_compress=[model.conv2, model.fc2], num_rank_indices=20, error_margin=100)
self.assertTrue((c_model.conv2[0].bias is None))
self.assertTrue((c_model.conv2[1].bias is None))
self.assertTrue((c_model.fc2[0].bias is None))
self.assertTrue((c_model.fc2[1].bias is None))
self.assertEqual(2, len(stats.per_rank_index[0].per_selected_layer))
self.assertEqual('conv2', stats.per_rank_index[0].per_selected_layer[0].layer_name)
self.assertEqual('fc2', stats.per_rank_index[0].per_selected_layer[1].layer_name)
def test_compress_model_with_stride(self):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
logger.debug(self.id())
model = MnistModel().to('cpu')
model.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=(2, 2), stride=(2, 2))
model.fc1 = nn.Linear(((3 * 3) * 64), 1024)
(c_model, stats) = svd_intf.Svd.compress_model(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.mac, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.manual, rank_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.RankSelectionScheme.auto, layers_to_compress=[model.conv2, model.fc2], num_rank_indices=20, error_margin=100)
self.assertEqual(2, len(stats.per_rank_index[0].per_selected_layer))
self.assertEqual('conv2', stats.per_rank_index[0].per_selected_layer[0].layer_name)
self.assertEqual('fc2', stats.per_rank_index[0].per_selected_layer[1].layer_name)
.cuda
def test_model_allocation_gpu(self):
model = MnistModel().to('cuda')
svd = s.SvdImpl(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
self.assertTrue(svd._is_model_on_gpu())
model.conv1.to('cpu')
self.assertFalse(svd._is_model_on_gpu())
model = MnistModel().to('cpu')
svd = s.SvdImpl(model=model, run_model=mnist_model.evaluate, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=aimet_torch.svd.svd_intf_defs_deprecated.CompressionTechnique.svd, cost_metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, layer_selection_scheme=aimet_torch.svd.svd_intf_defs_deprecated.LayerSelectionScheme.top_n_layers, num_layers=2)
self.assertFalse(svd._is_model_on_gpu())
model.cuda()
self.assertTrue(svd._is_model_on_gpu())
def test_split_manual_rank(self):
model = MnistModel().to('cpu')
run_model = mnist_model.evaluate
logger.debug(self.id())
intf_defs = aimet_torch.svd.svd_intf_defs_deprecated
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
with unittest.mock.patch('aimet_torch.layer_database.LayerDatabase'):
with unittest.mock.patch('aimet_torch.svd.layer_selector_deprecated.LayerSelectorDeprecated'):
svd = s.SvdImpl(model=model, run_model=None, run_model_iterations=1, input_shape=(1, 1, 28, 28), compression_type=intf_defs.CompressionTechnique.svd, cost_metric=intf_defs.CostMetric.memory, layer_selection_scheme=intf_defs.LayerSelectionScheme.manual, layers_to_compress=[model.fc1])
layer_rank_list = [[model.fc1, 9]]
with unittest.mock.patch('aimet_common.cost_calculator.CostCalculator.compute_network_cost') as compute_network_cost:
compute_network_cost.return_value = cc.Cost(100, 200)
svd._svd_lib_ref = create_autospec(pymo.Svd, instance=True)
split_weights = [np.zeros((400, model.fc1.in_features)).flatten().tolist(), np.zeros((model.fc1.out_features, 400)).flatten().tolist()]
svd._svd_lib_ref.SplitLayerWeights.return_value = split_weights
split_biases = [np.zeros(400).flatten().tolist(), np.zeros(model.fc1.out_features).flatten().tolist()]
svd._svd_lib_ref.SplitLayerBiases.return_value = split_biases
rank_selector = rank_select.RankSelector(svd_lib_ref=svd._svd_lib_ref)
(rank_data_list, svd_rank_pair_dict) = rank_selector.split_manual_rank(model=model, run_model=run_model, run_model_iterations=1, use_cuda=False, metric=aimet_torch.svd.svd_intf_defs_deprecated.CostMetric.memory, database=layer_db, layer_rank_list=layer_rank_list)
self.assertEqual(len(svd_rank_pair_dict), 1) |
def make20(b):
theta1 = numpy.arccos((numpy.sqrt(5) / 3))
theta2 = numpy.arcsin(((r2edge(theta1, 1) / 2) / numpy.sin((numpy.pi / 5))))
r = ((b / 2) / numpy.sin((theta1 / 2)))
rot72 = rotmatz(((numpy.pi * 2) / 5))
s2 = numpy.sin(theta2)
c2 = numpy.cos(theta2)
s3 = numpy.sin((theta1 + theta2))
c3 = numpy.cos((theta1 + theta2))
p1 = numpy.array(((s2 * r), 0, (c2 * r)))
p2 = numpy.array(((s3 * r), 0, (c3 * r)))
p3 = numpy.array((((- s3) * r), 0, ((- c3) * r)))
p4 = numpy.array((((- s2) * r), 0, ((- c2) * r)))
coord = []
for i in range(5):
coord.append(p1)
p1 = numpy.dot(p1, rot72)
for i in range(5):
coord.append(p2)
p2 = numpy.dot(p2, rot72)
for i in range(5):
coord.append(p3)
p3 = numpy.dot(p3, rot72)
for i in range(5):
coord.append(p4)
p4 = numpy.dot(p4, rot72)
return numpy.array(coord) |
def create_pile(class_ids, num_instances, random_state=None):
if (random_state is None):
random_state = np.random.RandomState()
x = ((- 0.2), 0.2)
y = ((- 0.2), 0.2)
z = 0.5
bin_unique_id = safepicking.pybullet.create_bin(X=(x[1] - x[0]), Y=(y[1] - y[0]), Z=(z / 2))
unique_ids = []
class_ids = random_state.choice(class_ids, num_instances).tolist()
while class_ids:
class_id = class_ids.pop()
position = random_state.uniform([x[0], y[0], z], [x[1], y[1], z])
quaternion = random_state.random((4,))
quaternion /= np.linalg.norm(quaternion)
coord = safepicking.geometry.Coordinate(position, quaternion=quaternion)
visual_file = safepicking.datasets.ycb.get_visual_file(class_id=class_id)
collision_file = safepicking.pybullet.get_collision_file(visual_file)
unique_id = safepicking.pybullet.create_mesh_body(visual_file=visual_file, collision_file=collision_file, mass=safepicking.datasets.ycb.masses[class_id], position=coord.position, quaternion=coord.quaternion)
p.addUserData(unique_id, 'class_id', str(class_id))
for _ in range(1000):
p.stepSimulation()
if (np.linalg.norm(p.getBaseVelocity(unique_id)[0]) < 1e-12):
break
(aabb_min, aabb_max) = p.getAABB(bin_unique_id)
(position, _) = p.getBasePositionAndOrientation(unique_id)
if (not ((aabb_min[0] < position[0] < aabb_max[0]) and (aabb_min[1] < position[1] < aabb_max[1]))):
p.removeBody(unique_id)
class_ids.append(class_id)
else:
unique_ids.append(unique_id)
for _ in range(250):
coord = safepicking.geometry.Coordinate(*pp.get_pose(bin_unique_id))
coord.translate([0, 0, (- 0.001)], wrt='world')
pp.set_pose(bin_unique_id, coord.pose)
for _ in range(100):
p.stepSimulation()
if all(((np.linalg.norm(p.getBaseVelocity(unique_id)[0]) < 1e-12) for unique_id in safepicking.pybullet.get_body_unique_ids())):
break
p.removeBody(bin_unique_id)
return unique_ids |
def test_multiply_float_int():
float_width = 24
int_width = 8
val = np.random.random()
fp_bits = iter_bits_fixed_point(val, float_width)
fp_int = int(''.join((str(b) for b in fp_bits)), 2)
int_val = np.random.randint(0, ((2 ** int_width) - 1))
result = multiply_fixed_point_float_by_int(fp_int, int_val, float_width, int_width)
assert (abs(((result / (2 ** float_width)) - (int_val * val))) <= (int_width * (2 ** (int_width - float_width)))) |
class ObjectMessageType(MessageType):
def message_type_name(self):
return 'obj'
def message_to_bytes(self, message):
packer = Packer()
packer.pack_object(message)
return packer.get_buffer()
def message_from_bytes(self, bb):
if bb:
unpacker = Unpacker(bb)
return unpacker.unpack_object()
else:
return None |
class PandaBucketConfig(PandaDefaultConfig):
def __init__(self) -> None:
super().__init__()
self.urdf_path = '{PACKAGE_ASSET_DIR}/descriptions/panda_bucket.urdf'
self.ee_link_name = 'bucket'
def controllers(self):
controller_configs = super().controllers
for (k, v) in controller_configs.items():
if (isinstance(v, dict) and ('gripper' in v)):
v.pop('gripper')
return controller_configs
def cameras(self):
return CameraConfig(uid='hand_camera', p=[0.0, 0.08, 0.0], q=[0.5, (- 0.5), (- 0.5), (- 0.5)], width=128, height=128, near=0.01, far=10, fov=(np.pi / 2), actor_uid='bucket') |
def test_caption_query_get_by_language_code_when_exists():
caption1 = Caption({'url': 'url1', 'name': {'simpleText': 'name1'}, 'languageCode': 'en', 'vssId': '.en'})
caption2 = Caption({'url': 'url2', 'name': {'simpleText': 'name2'}, 'languageCode': 'fr', 'vssId': '.fr'})
caption_query = CaptionQuery(captions=[caption1, caption2])
assert (caption_query['en'] == caption1) |
class BezierFamily(BasisFamily):
def __init__(self, N, T=1):
super(BezierFamily, self).__init__(N)
self.T = float(T)
def eval_deriv(self, i, k, t, var=None):
if (i >= self.N):
raise ValueError('Basis function index too high')
elif (k >= self.N):
return (0 * t)
n = (self.N - 1)
u = (t / self.T)
if (k == 0):
return ((binom(n, i) * (u ** i)) * ((1 - u) ** (n - i)))
return (binom(n, i) * sum([(((((((- 1) ** (j - i)) * binom((n - i), (j - i))) * factorial(j)) / factorial((j - k))) * np.power(u, (j - k))) / np.power(self.T, k)) for j in range(max(i, k), (n + 1))])) |
class SettingsTree(QTreeWidget):
def __init__(self, parent=None):
super(SettingsTree, self).__init__(parent)
self.setItemDelegate(VariantDelegate(self))
self.setHeaderLabels(('Setting', 'Type', 'Value'))
self.header().setSectionResizeMode(0, QHeaderView.Stretch)
self.header().setSectionResizeMode(2, QHeaderView.Stretch)
self.settings = None
self.refreshTimer = QTimer()
self.refreshTimer.setInterval(2000)
self.autoRefresh = False
self.groupIcon = QIcon()
self.groupIcon.addPixmap(self.style().standardPixmap(QStyle.SP_DirClosedIcon), QIcon.Normal, QIcon.Off)
self.groupIcon.addPixmap(self.style().standardPixmap(QStyle.SP_DirOpenIcon), QIcon.Normal, QIcon.On)
self.keyIcon = QIcon()
self.keyIcon.addPixmap(self.style().standardPixmap(QStyle.SP_FileIcon))
self.refreshTimer.timeout.connect(self.maybeRefresh)
def setSettingsObject(self, settings):
self.settings = settings
self.clear()
if (self.settings is not None):
self.settings.setParent(self)
self.refresh()
if self.autoRefresh:
self.refreshTimer.start()
else:
self.refreshTimer.stop()
def sizeHint(self):
return QSize(800, 600)
def setAutoRefresh(self, autoRefresh):
self.autoRefresh = autoRefresh
if (self.settings is not None):
if self.autoRefresh:
self.maybeRefresh()
self.refreshTimer.start()
else:
self.refreshTimer.stop()
def setFallbacksEnabled(self, enabled):
if (self.settings is not None):
self.settings.setFallbacksEnabled(enabled)
self.refresh()
def maybeRefresh(self):
if (self.state() != QAbstractItemView.EditingState):
self.refresh()
def refresh(self):
if (self.settings is None):
return
try:
self.itemChanged.disconnect(self.updateSetting)
except:
pass
self.settings.sync()
self.updateChildItems(None)
self.itemChanged.connect(self.updateSetting)
def event(self, event):
if (event.type() == QEvent.WindowActivate):
if (self.isActiveWindow() and self.autoRefresh):
self.maybeRefresh()
return super(SettingsTree, self).event(event)
def updateSetting(self, item):
key = item.text(0)
ancestor = item.parent()
while ancestor:
key = ((ancestor.text(0) + '/') + key)
ancestor = ancestor.parent()
d = item.data(2, Qt.UserRole)
self.settings.setValue(key, item.data(2, Qt.UserRole))
if self.autoRefresh:
self.refresh()
def updateChildItems(self, parent):
dividerIndex = 0
for group in self.settings.childGroups():
childIndex = self.findChild(parent, group, dividerIndex)
if (childIndex != (- 1)):
child = self.childAt(parent, childIndex)
child.setText(1, '')
child.setText(2, '')
child.setData(2, Qt.UserRole, None)
self.moveItemForward(parent, childIndex, dividerIndex)
else:
child = self.createItem(group, parent, dividerIndex)
child.setIcon(0, self.groupIcon)
dividerIndex += 1
self.settings.beginGroup(group)
self.updateChildItems(child)
self.settings.endGroup()
for key in self.settings.childKeys():
childIndex = self.findChild(parent, key, 0)
if ((childIndex == (- 1)) or (childIndex >= dividerIndex)):
if (childIndex != (- 1)):
child = self.childAt(parent, childIndex)
for i in range(child.childCount()):
self.deleteItem(child, i)
self.moveItemForward(parent, childIndex, dividerIndex)
else:
child = self.createItem(key, parent, dividerIndex)
child.setIcon(0, self.keyIcon)
dividerIndex += 1
else:
child = self.childAt(parent, childIndex)
value = self.settings.value(key)
if (value is None):
child.setText(1, 'Invalid')
else:
child.setText(1, value.__class__.__name__)
child.setText(2, VariantDelegate.displayText(value))
child.setData(2, Qt.UserRole, value)
while (dividerIndex < self.childCount(parent)):
self.deleteItem(parent, dividerIndex)
def createItem(self, text, parent, index):
after = None
if (index != 0):
after = self.childAt(parent, (index - 1))
if (parent is not None):
item = QTreeWidgetItem(parent, after)
else:
item = QTreeWidgetItem(self, after)
item.setText(0, text)
item.setFlags((item.flags() | Qt.ItemIsEditable))
return item
def deleteItem(self, parent, index):
if (parent is not None):
item = parent.takeChild(index)
else:
item = self.takeTopLevelItem(index)
del item
def childAt(self, parent, index):
if (parent is not None):
return parent.child(index)
else:
return self.topLevelItem(index)
def childCount(self, parent):
if (parent is not None):
return parent.childCount()
else:
return self.topLevelItemCount()
def findChild(self, parent, text, startIndex):
for i in range(self.childCount(parent)):
if (self.childAt(parent, i).text(0) == text):
return i
return (- 1)
def moveItemForward(self, parent, oldIndex, newIndex):
for int in range((oldIndex - newIndex)):
self.deleteItem(parent, newIndex) |
class ASSWriter():
ext = 'ass'
def _format_time(seconds):
h = int((seconds / 3600))
m = (int((seconds / 60)) % 60)
s = int((seconds % 60))
cs = int(((seconds % 1) * 100))
return ('%i:%02i:%02i.%02i' % (h, m, s, cs))
def header(self, file):
file.write('[Script Info]\nScriptType: v4.00+\nCollisions: Normal\nTimer: 100.0000\n\n[V4+ Styles]\nStyle: Default,Arial,16,&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,10,0\n\n[Events]\n')
def caption(self, file, caption):
start = self._format_time(caption.start_seconds)
end = self._format_time(caption.end_seconds)
file.write(('Dialogue: 0,%s,%s,Default,,0,0,0,,%s\n' % (start, end, caption.text))) |
def test_subquery_expression_without_source_table():
assert_column_lineage_equal('INSERT INTO foo\nSELECT (SELECT col1 + col2 AS result) AS sum_result\nFROM bar', [(ColumnQualifierTuple('col1', 'bar'), ColumnQualifierTuple('sum_result', 'foo')), (ColumnQualifierTuple('col2', 'bar'), ColumnQualifierTuple('sum_result', 'foo'))]) |
class ChocolateyPackage(Package):
def is_installed(self):
return (self.run_test('choco info -lo %s', self.name).rc == 0)
def version(self):
(_, version) = self.check_output('choco info -lo %s -r', self.name).split('|', 1)
return version
def release(self):
raise NotImplementedError |
def open_url(url: str, cache_dir: str=None, num_attempts: int=10, verbose: bool=True, return_filename: bool=False, cache: bool=True) -> Any:
assert (num_attempts >= 1)
assert (not (return_filename and (not cache)))
if (not re.match('^[a-z]+://', url)):
return (url if return_filename else open(url, 'rb'))
if url.startswith('file://'):
filename = urllib.parse.urlparse(url).path
if re.match('^/[a-zA-Z]:', filename):
filename = filename[1:]
return (filename if return_filename else open(filename, 'rb'))
assert is_url(url)
if (cache_dir is None):
cache_dir = make_cache_dir_path('downloads')
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
if cache:
cache_files = glob.glob(os.path.join(cache_dir, (url_md5 + '_*')))
if (len(cache_files) == 1):
filename = cache_files[0]
return (filename if return_filename else open(filename, 'rb'))
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print(('Downloading %s ...' % url), end='', flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if (len(res.content) == 0):
raise IOError('No data received')
if (len(res.content) < 8192):
content_str = res.content.decode('utf-8')
if ('download_warning' in res.headers.get('Set-Cookie', '')):
links = [html.unescape(link) for link in content_str.split('"') if ('export=download' in link)]
if (len(links) == 1):
url = requests.compat.urljoin(url, links[0])
raise IOError('Google Drive virus checker nag')
if ('Google Drive - Quota exceeded' in content_str):
raise IOError('Google Drive download quota exceeded -- please try again later')
match = re.search('filename="([^"]*)"', res.headers.get('Content-Disposition', ''))
url_name = (match[1] if match else url)
url_data = res.content
if verbose:
print(' done')
break
except KeyboardInterrupt:
raise
except:
if (not attempts_left):
if verbose:
print(' failed')
raise
if verbose:
print('.', end='', flush=True)
if cache:
safe_name = re.sub('[^0-9a-zA-Z-._]', '_', url_name)
cache_file = os.path.join(cache_dir, ((url_md5 + '_') + safe_name))
temp_file = os.path.join(cache_dir, ((((('tmp_' + uuid.uuid4().hex) + '_') + url_md5) + '_') + safe_name))
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, 'wb') as f:
f.write(url_data)
os.replace(temp_file, cache_file)
if return_filename:
return cache_file
assert (not return_filename)
return io.BytesIO(url_data) |
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms.TripleDES((b'\x00' * 8)), modes.CFB8((b'\x00' * 8)))), skip_message='Does not support TripleDES CFB8')
class TestTripleDESModeCFB8():
test_kat = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', '3DES', 'CFB'), ['TCFB8invperm.rsp', 'TCFB8permop.rsp', 'TCFB8subtab.rsp', 'TCFB8varkey.rsp', 'TCFB8vartext.rsp'], (lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys))), (lambda iv, **kwargs: modes.CFB8(binascii.unhexlify(iv))))
test_mmt = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', '3DES', 'CFB'), ['TCFB8MMT1.rsp', 'TCFB8MMT2.rsp', 'TCFB8MMT3.rsp'], (lambda key1, key2, key3, **kwargs: algorithms.TripleDES(binascii.unhexlify(((key1 + key2) + key3)))), (lambda iv, **kwargs: modes.CFB8(binascii.unhexlify(iv)))) |
class UNetPlusPlus(nn.Module):
def __init__(self, in_ch, base_ch, scale, kernel_size, num_classes=1, block='SingleConv', norm='bn'):
super().__init__()
num_block = 2
block = get_block(block)
norm = get_norm(norm)
n_ch = [base_ch, (base_ch * 2), (base_ch * 4), (base_ch * 8), (base_ch * 10)]
self.pool0 = nn.MaxPool3d(scale[0])
self.up0 = nn.Upsample(scale_factor=tuple(scale[0]), mode='trilinear', align_corners=True)
self.pool1 = nn.MaxPool3d(scale[1])
self.up1 = nn.Upsample(scale_factor=tuple(scale[1]), mode='trilinear', align_corners=True)
self.pool2 = nn.MaxPool3d(scale[2])
self.up2 = nn.Upsample(scale_factor=tuple(scale[2]), mode='trilinear', align_corners=True)
self.pool3 = nn.MaxPool3d(scale[3])
self.up3 = nn.Upsample(scale_factor=tuple(scale[3]), mode='trilinear', align_corners=True)
self.conv0_0 = self.make_layer(in_ch, n_ch[0], num_block, block, kernel_size=kernel_size[0], norm=norm)
self.conv1_0 = self.make_layer(n_ch[0], n_ch[1], num_block, block, kernel_size=kernel_size[1], norm=norm)
self.conv2_0 = self.make_layer(n_ch[1], n_ch[2], num_block, block, kernel_size=kernel_size[2], norm=norm)
self.conv3_0 = self.make_layer(n_ch[2], n_ch[3], num_block, block, kernel_size=kernel_size[3], norm=norm)
self.conv4_0 = self.make_layer(n_ch[3], n_ch[4], num_block, block, kernel_size=kernel_size[4], norm=norm)
self.conv0_1 = self.make_layer((n_ch[0] + n_ch[1]), n_ch[0], num_block, block, kernel_size=kernel_size[0], norm=norm)
self.conv1_1 = self.make_layer((n_ch[1] + n_ch[2]), n_ch[1], num_block, block, kernel_size=kernel_size[1], norm=norm)
self.conv2_1 = self.make_layer((n_ch[2] + n_ch[3]), n_ch[2], num_block, block, kernel_size=kernel_size[2], norm=norm)
self.conv3_1 = self.make_layer((n_ch[3] + n_ch[4]), n_ch[3], num_block, block, kernel_size=kernel_size[3], norm=norm)
self.conv0_2 = self.make_layer(((n_ch[0] * 2) + n_ch[1]), n_ch[0], num_block, block, kernel_size=kernel_size[0], norm=norm)
self.conv1_2 = self.make_layer(((n_ch[1] * 2) + n_ch[2]), n_ch[1], num_block, block, kernel_size=kernel_size[1], norm=norm)
self.conv2_2 = self.make_layer(((n_ch[2] * 2) + n_ch[3]), n_ch[2], num_block, block, kernel_size=kernel_size[2], norm=norm)
self.conv0_3 = self.make_layer(((n_ch[0] * 3) + n_ch[1]), n_ch[0], num_block, block, kernel_size=kernel_size[0], norm=norm)
self.conv1_3 = self.make_layer(((n_ch[1] * 3) + n_ch[2]), n_ch[1], num_block, block, kernel_size=kernel_size[1], norm=norm)
self.conv0_4 = self.make_layer(((n_ch[0] * 4) + n_ch[1]), n_ch[0], num_block, block, kernel_size=kernel_size[0], norm=norm)
self.output = nn.Conv3d(n_ch[0], num_classes, kernel_size=1)
def forward(self, x):
x0_0 = self.conv0_0(x)
x1_0 = self.conv1_0(self.pool0(x0_0))
x0_1 = self.conv0_1(torch.cat([x0_0, self.up0(x1_0)], 1))
x2_0 = self.conv2_0(self.pool1(x1_0))
x1_1 = self.conv1_1(torch.cat([x1_0, self.up1(x2_0)], 1))
x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up0(x1_1)], 1))
x3_0 = self.conv3_0(self.pool2(x2_0))
x2_1 = self.conv2_1(torch.cat([x2_0, self.up2(x3_0)], 1))
x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up1(x2_1)], 1))
x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up0(x1_2)], 1))
x4_0 = self.conv4_0(self.pool3(x3_0))
x3_1 = self.conv3_1(torch.cat([x3_0, self.up3(x4_0)], 1))
x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up2(x3_1)], 1))
x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up1(x2_2)], 1))
x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up0(x1_3)], 1))
output = self.output(x0_4)
return output
def make_layer(self, in_ch, out_ch, num_block, block, kernel_size, norm):
blocks = []
blocks.append(block(in_ch, out_ch, kernel_size=kernel_size, norm=norm))
for i in range((num_block - 1)):
blocks.append(block(out_ch, out_ch, kernel_size=kernel_size, norm=norm))
return nn.Sequential(*blocks) |
def k_means_cluster(root, k, nodes):
t = time.process_time()
if (len(nodes) <= k):
clusters = [[n] for n in nodes]
return clusters
ns = list(nodes)
root.stats['count_kmeans_iter_f'] += 1
cluster_starts = ns[:k]
cluster_centers = [center_of_gravity([n]) for n in cluster_starts]
while True:
root.stats['sum_kmeans_iter_f'] += 1
clusters = [[] for c in cluster_centers]
for n in ns:
idx = closest(cluster_centers, n)
clusters[idx].append(n)
clusters = [c for c in clusters if (len(c) > 0)]
for c in clusters:
if (len(c) == 0):
print('Error....')
print(('Nodes: %d, centers: %s.' % (len(ns), repr(cluster_centers))))
assert (len(c) > 0)
new_cluster_centers = [center_of_gravity(c) for c in clusters]
if (new_cluster_centers == cluster_centers):
root.stats['avg_kmeans_iter_f'] = float((root.stats['sum_kmeans_iter_f'] / root.stats['count_kmeans_iter_f']))
root.stats['longest_kmeans'] = max(root.stats['longest_kmeans'], (time.process_time() - t))
return clusters
else:
cluster_centers = new_cluster_centers |
def conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
if (stride == 1):
return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='same', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x)
else:
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (rate - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='valid', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) |
def check_vocab_and_split(orig, bpe_codes, vocab, separator):
out = []
for segment in orig[:(- 1)]:
if ((segment + separator) in vocab):
out.append(segment)
else:
for item in recursive_split(segment, bpe_codes, vocab, separator, False):
out.append(item)
segment = orig[(- 1)]
if (segment in vocab):
out.append(segment)
else:
for item in recursive_split(segment, bpe_codes, vocab, separator, True):
out.append(item)
return out |
class np_random():
def __init__(self, seed):
self.seed = seed
self.state = None
def __enter__(self):
self.state = np.random.get_state()
np.random.seed(self.seed)
return self.state
def __exit__(self, exc_type, exc_val, exc_tb):
np.random.set_state(self.state) |
def xf_epilogue(self):
self._xf_epilogue_done = 1
num_xfs = len(self.xf_list)
blah = (DEBUG or (self.verbosity >= 3))
blah1 = (DEBUG or (self.verbosity >= 1))
if blah:
fprintf(self.logfile, 'xf_epilogue called ...\n')
def check_same(book_arg, xf_arg, parent_arg, attr):
if (getattr(xf_arg, attr) != getattr(parent_arg, attr)):
fprintf(book_arg.logfile, 'NOTE !!! XF[%d] parent[%d] %s different\n', xf_arg.xf_index, parent_arg.xf_index, attr)
for xfx in xrange(num_xfs):
xf = self.xf_list[xfx]
try:
fmt = self.format_map[xf.format_key]
cellty = _cellty_from_fmtty[fmt.type]
except KeyError:
cellty = XL_CELL_TEXT
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
if (not self.formatting_info):
continue
if xf.is_style:
continue
if (not (0 <= xf.parent_style_index < num_xfs)):
if blah1:
fprintf(self.logfile, 'WARNING *** XF[%d]: is_style=%d but parent_style_index=%d\n', xf.xf_index, xf.is_style, xf.parent_style_index)
xf.parent_style_index = 0
if (self.biff_version >= 30):
if blah1:
if (xf.parent_style_index == xf.xf_index):
fprintf(self.logfile, 'NOTE !!! XF[%d]: parent_style_index is also %d\n', xf.xf_index, xf.parent_style_index)
elif (not self.xf_list[xf.parent_style_index].is_style):
fprintf(self.logfile, 'NOTE !!! XF[%d]: parent_style_index is %d; style flag not set\n', xf.xf_index, xf.parent_style_index)
if (blah1 and (xf.parent_style_index > xf.xf_index)):
fprintf(self.logfile, 'NOTE !!! XF[%d]: parent_style_index is %d; out of order?\n', xf.xf_index, xf.parent_style_index)
parent = self.xf_list[xf.parent_style_index]
if ((not xf._alignment_flag) and (not parent._alignment_flag)):
if blah1:
check_same(self, xf, parent, 'alignment')
if ((not xf._background_flag) and (not parent._background_flag)):
if blah1:
check_same(self, xf, parent, 'background')
if ((not xf._border_flag) and (not parent._border_flag)):
if blah1:
check_same(self, xf, parent, 'border')
if ((not xf._protection_flag) and (not parent._protection_flag)):
if blah1:
check_same(self, xf, parent, 'protection')
if ((not xf._format_flag) and (not parent._format_flag)):
if (blah1 and (xf.format_key != parent.format_key)):
fprintf(self.logfile, 'NOTE !!! XF[%d] fmtk=%d, parent[%d] fmtk=%r\n%r / %r\n', xf.xf_index, xf.format_key, parent.xf_index, parent.format_key, self.format_map[xf.format_key].format_str, self.format_map[parent.format_key].format_str)
if ((not xf._font_flag) and (not parent._font_flag)):
if (blah1 and (xf.font_index != parent.font_index)):
fprintf(self.logfile, 'NOTE !!! XF[%d] fontx=%d, parent[%d] fontx=%r\n', xf.xf_index, xf.font_index, parent.xf_index, parent.font_index) |
class ItemEffects(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
self.item = item
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.effectList = AutoListCtrl(self, wx.ID_ANY, style=(((wx.LC_REPORT | wx.LC_SINGLE_SEL) | wx.LC_VRULES) | wx.NO_BORDER))
mainSizer.Add(self.effectList, 1, (wx.ALL | wx.EXPAND), 0)
self.SetSizer(mainSizer)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnClick, self.effectList)
self.PopulateList()
def PopulateList(self):
self.effectList.InsertColumn(0, _t('Name'))
self.effectList.InsertColumn(1, _t('Active'))
self.effectList.InsertColumn(2, _t('Type'))
if config.debug:
self.effectList.InsertColumn(3, _t('Run Time'))
self.effectList.InsertColumn(4, _t('ID'))
self.effectList.setResizeColumn(0)
self.effectList.SetColumnWidth(1, 50)
self.effectList.SetColumnWidth(2, 80)
if config.debug:
self.effectList.SetColumnWidth(3, 65)
self.effectList.SetColumnWidth(4, 40)
item = self.item
self.effects = effects = item.effects
names = list(effects.keys())
names.sort()
for name in names:
index = self.effectList.InsertItem(self.effectList.GetItemCount(), name)
if effects[name].isImplemented:
if effects[name].activeByDefault:
activeByDefault = _t('Yes')
else:
activeByDefault = _t('No')
else:
activeByDefault = ''
effectTypeText = ''
if effects[name].type:
for effectType in effects[name].type:
effectTypeText += (effectType + ' ')
pass
if (effects[name].runTime and effects[name].isImplemented):
effectRunTime = str(effects[name].runTime)
else:
effectRunTime = ''
self.effectList.SetItem(index, 1, activeByDefault)
self.effectList.SetItem(index, 2, effectTypeText)
if config.debug:
self.effectList.SetItem(index, 3, effectRunTime)
self.effectList.SetItem(index, 4, str(effects[name].ID))
self.effectList.RefreshRows()
self.Layout()
def OnClick(self, event):
try:
activeByDefault = getattr(self.item.effects[event.GetText()], 'activeByDefault')
if activeByDefault:
setattr(self.item.effects[event.GetText()], 'activeByDefault', False)
else:
setattr(self.item.effects[event.GetText()], 'activeByDefault', True)
except AttributeError:
pass
self.RefreshValues(event)
def RefreshValues(self, event):
self.Freeze()
self.effectList.ClearAll()
self.PopulateList()
self.effectList.RefreshRows()
self.Layout()
self.Thaw()
event.Skip() |
class EventsImporterTestCase(TestCase):
def setUpClass(cls):
super().setUpClass()
cls.calendar = Calendar.objects.create(url=EVENTS_CALENDAR_URL, slug='python-events')
def test_injest(self):
importer = ICSImporter(self.calendar)
with open(EVENTS_CALENDAR) as fh:
ical = fh.read()
importer.import_events_from_text(ical)
def test_modified_event(self):
importer = ICSImporter(self.calendar)
ical = 'BEGIN:VCALENDAR\nPRODID:-//Google Inc//Google Calendar 70.9054//EN\nVERSION:2.0\nCALSCALE:GREGORIAN\nMETHOD:PUBLISH\nX-WR-CALNAME:Python Events Calendar\nX-WR-TIMEZONE:Etc/GMT\nX-WR-CALDESC:Calendar showing Python conference and user group meeting date\n s.\nBEGIN:VEVENT\nDTSTART;VALUE=DATE:\nDTEND;VALUE=DATE:\nDTSTAMP:T221918Z\nUID:\nCREATED:T123318Z\nDESCRIPTION:<a href=" C\n ologne 2016</a>\nLAST-MODIFIED:T210533Z\nLOCATION:GFU Cyrus AG, Am Grauen Stein 27, 51105 Koln, Germany\nSEQUENCE:0\nSTATUS:CONFIRMED\nSUMMARY:PythonCamp Cologne 2016\nTRANSP:TRANSPARENT\nEND:VEVENT\nEND:VCALENDAR\n'
importer.import_events_from_text(ical)
e = Event.objects.get(uid='')
self.assertEqual(e.calendar.url, EVENTS_CALENDAR_URL)
self.assertEqual(e.description.rendered, '<a href=" Cologne 2016</a>')
self.assertTrue(e.next_or_previous_time.all_day)
self.assertEqual(make_aware(datetime(year=2016, month=4, day=2)), e.next_or_previous_time.dt_start)
self.assertEqual(make_aware(datetime(year=2016, month=4, day=3)), e.next_or_previous_time.dt_end)
ical = 'BEGIN:VCALENDAR\nPRODID:-//Google Inc//Google Calendar 70.9054//EN\nVERSION:2.0\nCALSCALE:GREGORIAN\nMETHOD:PUBLISH\nX-WR-CALNAME:Python Events Calendar\nX-WR-TIMEZONE:Etc/GMT\nX-WR-CALDESC:Calendar showing Python conference and user group meeting date\n s.\nBEGIN:VEVENT\nDTSTART;VALUE=DATE:\nDTEND;VALUE=DATE:\nDTSTAMP:T221918Z\nUID:\nCREATED:T123318Z\nDESCRIPTION:Python Istanbul\nLAST-MODIFIED:T222533Z\nLOCATION:GFU Cyrus AG, Am Grauen Stein 27, 51105 Istanbul, Turkey\nSEQUENCE:0\nSTATUS:CONFIRMED\nSUMMARY:PythonCamp Cologne 2016\nTRANSP:TRANSPARENT\nEND:VEVENT\nEND:VCALENDAR\n'
importer.import_events_from_text(ical)
e2 = Event.objects.get(uid='')
self.assertEqual(e.pk, e2.pk)
self.assertEqual(e2.calendar.url, EVENTS_CALENDAR_URL)
self.assertEqual(e2.description.rendered, 'Python Istanbul')
self.assertTrue(e.next_or_previous_time.all_day)
self.assertEqual(make_aware(datetime(year=2016, month=4, day=2)), e.next_or_previous_time.dt_start)
self.assertEqual(make_aware(datetime(year=2016, month=4, day=3)), e.next_or_previous_time.dt_end)
def test_import_event_excludes_ending_day_when_all_day_is_true(self):
ical = 'BEGIN:VCALENDAR\nBEGIN:VEVENT\nDTSTART;VALUE=DATE:\nDTEND;VALUE=DATE:\nDTSTAMP:T092425Z\nUID:\nSUMMARY:PythonCamp 2015 - Python Bar Camp in Cologne\nDESCRIPTION:Python PythonCamp 2015 - Python Bar Camp in Cologne\nLOCATION:GFU Cyrus AG, Am Grauen Stein 27, 51105 Cologne, Germany\nEND:VEVENT\nEND:VCALENDAR\n'
importer = ICSImporter(self.calendar)
importer.import_events_from_text(ical)
all_day_event = Event.objects.get(uid='')
self.assertTrue(all_day_event.next_or_previous_time.all_day)
self.assertFalse(all_day_event.next_or_previous_time.single_day)
self.assertEqual(make_aware(datetime(year=2015, month=3, day=28)), all_day_event.next_or_previous_time.dt_start)
self.assertEqual(make_aware(datetime(year=2015, month=3, day=29)), all_day_event.next_or_previous_time.dt_end)
def test_import_event_does_not_exclude_ending_day_when_all_day_is_false(self):
ical = 'BEGIN:VCALENDAR\nBEGIN:VEVENT\nDTSTART:T200000Z\nDTEND:T203000Z\nDTSTAMP:T092425Z\nUID:\nSUMMARY:PythonCamp 2015 - Python Bar Camp in Cologne\nDESCRIPTION:Python PythonCamp 2015 - Python Bar Camp in Cologne\nLOCATION:GFU Cyrus AG, Am Grauen Stein 27, 51105 Cologne, Germany\nEND:VEVENT\nEND:VCALENDAR\n'
importer = ICSImporter(self.calendar)
importer.import_events_from_text(ical)
single_day_event = Event.objects.get(uid='')
self.assertFalse(single_day_event.next_or_previous_time.all_day)
self.assertTrue(single_day_event.next_or_previous_time.single_day)
self.assertEqual(make_aware(datetime(year=2013, month=8, day=2, hour=20)), single_day_event.next_or_previous_time.dt_start)
self.assertEqual(make_aware(datetime(year=2013, month=8, day=2, hour=20, minute=30)), single_day_event.next_or_previous_time.dt_end) |
def _load_named_resources() -> Dict[(str, Callable[([], Resource)])]:
resource_methods = load_group('torchx.named_resources', default={})
materialized_resources: Dict[(str, Callable[([], Resource)])] = {}
for (name, resource) in {**GENERIC_NAMED_RESOURCES, **AWS_NAMED_RESOURCES, **resource_methods}.items():
materialized_resources[name] = resource
materialized_resources['NULL'] = (lambda : NULL_RESOURCE)
materialized_resources['MISSING'] = (lambda : NULL_RESOURCE)
return materialized_resources |
class LoadData(object):
def __init__(self):
self.trainfile = './ML100K/train.txt'
self.testfile = './ML100K/test.txt'
(self.num_users, self.num_items) = self.map_features()
self.user_positive_list = self.get_positive_list(self.trainfile)
(self.Train_data, self.Test_data) = self.construct_data()
loader = movie_loader()
self.movie_dict = loader.movie_dict
self.all_genres = loader.genre_list
self.all_directors = loader.director_list
self.all_actors = loader.actor_list
self.num_genres = len(self.all_genres)
self.num_directors = len(self.all_directors)
self.num_actors = len(self.all_actors)
def map_features(self):
self.users = {}
self.items = {}
self.users_traverse = {}
self.items_traverse = {}
self.read_features(self.trainfile)
self.read_features(self.testfile)
return (len(self.users), len(self.items))
def read_features(self, file):
f = open(file)
line = f.readline()
u = len(self.users)
i = len(self.items)
while line:
contents = line.strip().split('\t')
user = contents[0]
item = contents[1]
if (user not in self.users):
self.users[user] = u
self.users_traverse[u] = user
u = (u + 1)
if (item not in self.items):
self.items[item] = i
self.items_traverse[i] = item
i = (i + 1)
line = f.readline()
f.close()
def construct_data(self):
(User, Item) = self.read_data(self.trainfile)
Train_data = self.construct_dataset(User, Item)
print('# of training:', len(User))
(User, Item) = self.read_data(self.testfile)
Test_data = self.construct_dataset(User, Item)
print('# of test:', len(User))
return (Train_data, Test_data)
def get_positive_list(self, file):
f = open(file)
line = f.readline()
user_positive_list = {}
while line:
contents = line.strip().split('\t')
user_id = self.users[contents[0]]
item_id = self.items[contents[1]]
if (user_id in user_positive_list):
user_positive_list[user_id].append(item_id)
else:
user_positive_list[user_id] = [item_id]
line = f.readline()
f.close()
return user_positive_list
def read_data(self, file):
f = open(file)
User = []
Item = []
line = f.readline()
while line:
contents = line.strip().split('\t')
User.append(self.users[contents[0]])
Item.append(self.items[contents[1]])
line = f.readline()
f.close()
return (User, Item)
def construct_dataset(self, User, Item):
Data_Dic = {}
lens = len(User)
Data_Dic['User'] = [User[i] for i in xrange(lens)]
Data_Dic['Item'] = [Item[i] for i in xrange(lens)]
return Data_Dic |
class TestPartial(unittest.TestCase):
def test_Al2SiO5(self):
cell = Lattice.from_para(7.8758, 7.9794, 5.6139, 90, 90, 90)
spg = 58
elements = ['Al', 'Si', 'O']
composition = [8, 4, 20]
sites = [{'4e': [0.0, 0.0, 0.2418], '4g': [0.1294, 0.6392, 0.0]}, {'4g': [0.2458, 0.2522, 0.0]}, []]
s = pyxtal()
s.from_random(3, spg, elements, composition, lattice=cell, sites=sites)
self.assertTrue(s.valid)
sites2 = [{'4e': [0.0, 0.0, 0.2418], '4g': [0.1294, 0.6392, 0.0]}, {'4g': [0.2458, 0.2522, 0.0]}, {'4g': [0.4241, 0.3636, 0.0]}]
s = pyxtal()
s.from_random(3, spg, elements, composition, lattice=cell, sites=sites2)
self.assertTrue(s.valid) |
def _warn_output_shape(*dim_names: str) -> None:
name = f'extract_patches{len(dim_names)}d'
partial_shape = 'x'.join(dim_names)
warnings.warn(f"The output shape of {name} will change in the future. The current shape B*PxCx{partial_shape} will be replaced by BxPxCx{partial_shape} thus adding a dimension. Here, 'P' denotes the number of extracted patches.", FutureWarning) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.