code stringlengths 281 23.7M |
|---|
def test_filter_submissions_by_tags(graphql_client, user, submission_factory, mock_has_ticket):
graphql_client.force_login(user)
submission = submission_factory(tags=['cat'])
submission_factory(conference=submission.conference, tags=['dog', 'bear'])
submission_3 = submission_factory(conference=submission.conference, tags=['cat', 'lion'])
mock_has_ticket(submission.conference)
query = 'query Submissions($code: String!, $tags: [String!]) {\n submissions(code: $code, tags: $tags) {\n items {\n id\n }\n }\n }'
resp = graphql_client.query(query, variables={'code': submission.conference.code, 'tags': [str(submission.tags.first().id), str(submission_3.tags.last().id)]})
assert (not resp.get('errors'))
assert ({'id': submission.hashid} in resp['data']['submissions']['items'])
assert ({'id': submission_3.hashid} in resp['data']['submissions']['items']) |
def pad_to_batch(dataset, batch_size):
def _pad_to_batch(*args):
flat_args = tf.nest.flatten(args)
for tensor in flat_args:
if (tensor.shape.ndims is None):
raise ValueError(('Unknown number of dimensions for tensor %s.' % tensor.name))
if (tensor.shape.ndims == 0):
raise ValueError(('Tensor %s is a scalar.' % tensor.name))
first_tensor = flat_args[0]
first_tensor_shape = tf.shape(first_tensor)
first_tensor_batch_size = first_tensor_shape[0]
difference = (batch_size - first_tensor_batch_size)
for (i, tensor) in enumerate(flat_args):
control_deps = []
if (i != 0):
if (first_tensor.shape[:1].is_fully_defined() and tensor.shape[:1].is_fully_defined()):
if (first_tensor.shape[0] != tensor.shape[0]):
raise ValueError(('Batch size of dataset tensors does not match. %s has shape %s, but %s has shape %s' % (first_tensor.name, first_tensor.shape, tensor.name, tensor.shape)))
else:
curr_shape = tf.shape(tensor)
control_deps = [tf.Assert(tf.equal(curr_shape[0], first_tensor_batch_size), [('Batch size of dataset tensors %s and %s do not match. Shapes are' % (tensor.name, first_tensor.name)), curr_shape, first_tensor_shape])]
with tf.control_dependencies(control_deps):
flat_args[i] = tf.pad(tensor, ([[0, difference]] + ([[0, 0]] * (tensor.shape.ndims - 1))))
flat_args[i].set_shape(([batch_size] + tensor.shape.as_list()[1:]))
return tf.nest.pack_sequence_as(args, flat_args)
return dataset.map(_pad_to_batch) |
def _get_cuts(transition, direction):
n = transition.network.size
if (direction is Direction.BIDIRECTIONAL):
yielded = set()
for cut in chain(_get_cuts(transition, Direction.CAUSE), _get_cuts(transition, Direction.EFFECT)):
cm = utils.np_hashable(cut.cut_matrix(n))
if (cm not in yielded):
yielded.add(cm)
(yield cut)
else:
mechanism = transition.mechanism_indices(direction)
purview = transition.purview_indices(direction)
for partition in mip_partitions(mechanism, purview, transition.node_labels):
(yield ActualCut(direction, partition, transition.node_labels)) |
.fast
def test_cut_slices(verbose=True, plot=True, close_plots=True, *args, **kwargs):
from radis.misc.warning import SlitDispersionWarning
_clean(plot, close_plots)
threshold = 0.01
w = np.arange(4000, 4400, 0.01)
w_slit = np.arange(4198, 4202, 0.1)
slices = _cut_slices(w, w_slit, linear_dispersion, threshold)
for sl in slices:
try:
offset_dilate_slit_function(w_slit, np.ones_like(w_slit), w[sl], linear_dispersion, threshold, True)
except SlitDispersionWarning:
return False
if plot:
plt.plot(w, sl, label='Slice {0:.2f}-{1:.2f} nm , slit dispersion ratio: {2:.3f}'.format(w[sl][0], w[sl][(- 1)], (linear_dispersion(w[sl][(- 1)]) / linear_dispersion(w[sl][0]))))
if plot:
plt.title('Cut slices, threshold (boundaries removed) = {0}'.format(threshold))
plt.xlabel('Wavelength (nm)')
plt.ylabel('Slices (Boolean)')
plt.legend(loc='best', prop={'size': 15})
assert (len(slices) == 8)
slices = _cut_slices(w[::(- 1)], w_slit, linear_dispersion, threshold)
assert (len(slices) == 8)
return True |
class Accuracy(metrics.Accuracy):
def update(self, output: Dict) -> None:
logits = output['logits']
targets = output['targets']
lens = output['lens']
indices = torch.argmax(logits, dim=(- 1))
correct = torch.eq(indices, targets)
mask = generate_length_mask(lens).to(logits.device)
correct = (correct * mask)
self._num_correct += torch.sum(correct).to(self._device)
self._num_examples += torch.sum(lens)
_grad()
def iteration_completed(self, engine: Engine) -> None:
output = self._output_transform(engine.state.output)
self.update(output) |
def split_sections(s):
section = None
content = []
for line in yield_lines(s):
if line.startswith('['):
if line.endswith(']'):
if (section or content):
(yield (section, content))
section = line[1:(- 1)].strip()
content = []
else:
raise ValueError('Invalid section heading', line)
else:
content.append(line)
(yield (section, content)) |
class OpenWrapper():
name: str
mode: str = 'w'
def __enter__(self) -> Any:
Path(self.name).parent.mkdir(parents=True, exist_ok=True)
self.file = open(self.name, self.mode)
return self.file
def __exit__(self, exception_type: Type[BaseException], exception_value: BaseException, traceback: Any) -> bool:
if (exception_type is not None):
return False
self.file.close()
return True |
def get_dkl_model(dataset='MNIST', binary=False):
num_classes = (2 if binary else (100 if (dataset == 'CIFAR100') else 10))
feature_extractor = (LeNetMadry(binary=False, feature_extractor=True) if (dataset == 'MNIST') else resnet.ResNet18(num_classes=num_classes, feature_extractor=True))
feature_extractor.cuda()
feature_extractor.train()
num_features = (feature_extractor.fc2.in_features if (dataset == 'MNIST') else feature_extractor.linear.in_features)
model = DKLModel(feature_extractor, num_dim=num_features)
likelihood = gpytorch.likelihoods.SoftmaxLikelihood(num_features=model.num_dim, num_classes=num_classes)
return (model, likelihood) |
(cc=STDCALL, params={'ProcessHandle': HANDLE, 'ProcessInformationClass': PROCESSINFOCLASS, 'ProcessInformation': PVOID, 'ProcessInformationLength': ULONG, 'ReturnLength': PULONG})
def hook_ZwQueryInformationProcess(ql: Qiling, address: int, params):
return _QueryInformationProcess(ql, address, params) |
class MetaSingleton(type):
def __init__(self, *args, **kwargs):
self.__instance = None
super(MetaSingleton, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if (self.__instance is None):
self.__instance = super(MetaSingleton, self).__call__(*args, **kwargs)
return self.__instance |
class SrtmTiff(object):
tile = {}
def __init__(self, filename):
self.tile = self.load_tile(filename)
def load_tile(self, filename):
dataset = gdal.Open(filename)
geotransform = dataset.GetGeoTransform()
xsize = dataset.RasterXSize
ysize = dataset.RasterYSize
lon_origin = geotransform[0]
lat_origin = geotransform[3]
lon_pixel = geotransform[1]
lat_pixel = geotransform[5]
retdict = {'xsize': xsize, 'ysize': ysize, 'lat_origin': lat_origin, 'lon_origin': lon_origin, 'lon_pixel': lon_pixel, 'lat_pixel': lat_pixel, 'N': lat_origin, 'S': (lat_origin + (lat_pixel * ysize)), 'E': (lon_origin + (lon_pixel * xsize)), 'W': lon_origin, 'dataset': dataset}
return retdict
def pos_from_lat_lon(self, lat, lon):
td = self.tile
N = td['N']
S = td['S']
E = td['E']
W = td['W']
lat_pixel = td['lat_pixel']
lon_pixel = td['lon_pixel']
xsize = td['xsize']
ysize = td['ysize']
rowno_f = ((lat - N) / lat_pixel)
colno_f = ((lon - W) / lon_pixel)
rowno = int(floor(rowno_f))
colno = int(floor(colno_f))
if (rowno < 0):
rowno = 0
if (rowno > (xsize - 1)):
rowno = (xsize - 1)
if (colno < 0):
colno = 0
if (colno > (ysize - 1)):
colno = (xsize - 1)
return (rowno, colno, rowno_f, colno_f)
def get_elevation(self, lat, lon):
(row, col, row_f, col_f) = self.pos_from_lat_lon(lat, lon)
if (row == 5999):
row = 5998
if (col == 5999):
col = 5998
htarr = gdalnumeric.DatasetReadAsArray(self.tile['dataset'], col, row, 2, 2)
height = bilinear_interpolation(htarr[0][0], htarr[0][1], htarr[1][0], htarr[1][1], (row_f - row), (col_f - col))
return height |
def _squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length):
features = []
(doc_tokens, char_to_word_offset) = ([], [])
prev_is_whitespace = True
for c in example.context_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[(- 1)] += c
prev_is_whitespace = False
char_to_word_offset.append((len(doc_tokens) - 1))
start_position = char_to_word_offset[example.start_position_character]
end_position = char_to_word_offset[min(((example.start_position_character + len(example.answer_text)) - 1), (len(char_to_word_offset) - 1))]
actual_text = ' '.join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(_whitespace_tokenize(example.answer_text))
if (actual_text.find(cleaned_answer_text) == (- 1)):
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
return []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = orig_to_tok_index[start_position]
if (end_position < (len(doc_tokens) - 1)):
tok_end_position = (orig_to_tok_index[(end_position + 1)] - 1)
else:
tok_end_position = (len(doc_tokens) - 1)
(tok_start_position, tok_end_position) = _improve_answer_span(all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text)
spans = []
truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length)
sequence_added_tokens = (((tokenizer.model_max_length - tokenizer.max_len_single_sentence) + 1) if (('roberta' in str(type(tokenizer))) or ('camembert' in str(type(tokenizer)))) else (tokenizer.model_max_length - tokenizer.max_len_single_sentence))
sequence_pair_added_tokens = (tokenizer.model_max_length - tokenizer.max_len_sentences_pair)
span_doc_tokens = all_doc_tokens
while ((len(spans) * doc_stride) < len(all_doc_tokens)):
encoded_dict = tokenizer.encode_plus((truncated_query if (tokenizer.padding_side == 'right') else span_doc_tokens), (span_doc_tokens if (tokenizer.padding_side == 'right') else truncated_query), truncation=('only_second' if (tokenizer.padding_side == 'right') else 'only_first'), padding='max_length', max_length=max_seq_length, return_overflowing_tokens=True, stride=(((max_seq_length - doc_stride) - len(truncated_query)) - sequence_pair_added_tokens), return_token_type_ids=True)
paragraph_len = min((len(all_doc_tokens) - (len(spans) * doc_stride)), ((max_seq_length - len(truncated_query)) - sequence_pair_added_tokens))
encoded_dict['start'] = (len(spans) * doc_stride)
encoded_dict['length'] = paragraph_len
spans.append(encoded_dict)
if (('overflowing_tokens' not in encoded_dict) or (('overflowing_tokens' in encoded_dict) and (len(encoded_dict['overflowing_tokens']) == 0))):
break
span_doc_tokens = encoded_dict['overflowing_tokens']
for span in spans:
cls_index = span['input_ids'].index(tokenizer.cls_token_id)
doc_start = span['start']
doc_end = ((span['start'] + span['length']) - 1)
out_of_span = False
if (not ((tok_start_position >= doc_start) and (tok_end_position <= doc_end))):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
else:
if (tokenizer.padding_side == 'left'):
doc_offset = 0
else:
doc_offset = (len(truncated_query) + sequence_added_tokens)
start_position = ((tok_start_position - doc_start) + doc_offset)
end_position = ((tok_end_position - doc_start) + doc_offset)
feature = QAFeatures(input_ids=span['input_ids'], attention_mask=span['attention_mask'], token_type_ids=span['token_type_ids'], start_positions=start_position, end_positions=end_position)
features.append(feature)
return features |
class ReadCoilsRequest(ReadBitsRequestBase):
function_code = 1
function_code_name = 'read_coils'
def __init__(self, address=None, count=None, slave=0, **kwargs):
ReadBitsRequestBase.__init__(self, address, count, slave, **kwargs)
def execute(self, context):
if (not (1 <= self.count <= 2000)):
return self.doException(merror.IllegalValue)
if (not context.validate(self.function_code, self.address, self.count)):
return self.doException(merror.IllegalAddress)
values = context.getValues(self.function_code, self.address, self.count)
if isinstance(values, ExceptionResponse):
return values
return ReadCoilsResponse(values) |
class LDAPUrl():
attr2extype = {'who': 'bindname', 'cred': 'X-BINDPW'}
def __init__(self, ldapUrl=None, urlscheme='ldap', hostport='', dn='', attrs=None, scope=None, filterstr=None, extensions=None, who=None, cred=None):
self.urlscheme = urlscheme.lower()
self.hostport = hostport
self.dn = dn
self.attrs = attrs
self.scope = scope
self.filterstr = filterstr
self.extensions = (extensions or LDAPUrlExtensions({}))
if (ldapUrl != None):
self._parse(ldapUrl)
if (who != None):
self.who = who
if (cred != None):
self.cred = cred
def __eq__(self, other):
return ((self.urlscheme == other.urlscheme) and (self.hostport == other.hostport) and (self.dn == other.dn) and (self.attrs == other.attrs) and (self.scope == other.scope) and (self.filterstr == other.filterstr) and (self.extensions == other.extensions))
def __ne__(self, other):
return (not self.__eq__(other))
def _parse(self, ldap_url):
if (not isLDAPUrl(ldap_url)):
raise ValueError(('Value %s for ldap_url does not seem to be a LDAP URL.' % repr(ldap_url)))
(scheme, rest) = ldap_url.split('://', 1)
self.urlscheme = scheme.lower()
slash_pos = rest.find('/')
qemark_pos = rest.find('?')
if ((slash_pos == (- 1)) and (qemark_pos == (- 1))):
self.hostport = unquote(rest)
self.dn = ''
return
elif ((slash_pos != (- 1)) and ((qemark_pos == (- 1)) or (slash_pos < qemark_pos))):
self.hostport = unquote(rest[:slash_pos])
rest = rest[(slash_pos + 1):]
elif ((qemark_pos != 1) and ((slash_pos == (- 1)) or (slash_pos > qemark_pos))):
self.hostport = unquote(rest[:qemark_pos])
rest = rest[qemark_pos:]
else:
raise ValueError('Something completely weird happened!')
paramlist = rest.split('?', 4)
paramlist_len = len(paramlist)
if (paramlist_len >= 1):
self.dn = unquote(paramlist[0]).strip()
if ((paramlist_len >= 2) and paramlist[1]):
self.attrs = unquote(paramlist[1].strip()).split(',')
if (paramlist_len >= 3):
scope = paramlist[2].strip()
try:
self.scope = SEARCH_SCOPE[scope]
except KeyError:
raise ValueError(('Invalid search scope %s' % repr(scope)))
if (paramlist_len >= 4):
filterstr = paramlist[3].strip()
if (not filterstr):
self.filterstr = None
else:
self.filterstr = unquote(filterstr)
if (paramlist_len >= 5):
if paramlist[4]:
self.extensions = LDAPUrlExtensions()
self.extensions.parse(paramlist[4])
else:
self.extensions = None
return
def applyDefaults(self, defaults):
for (k, value) in defaults.items():
if (getattr(self, k) is None):
setattr(self, k, value)
def initializeUrl(self):
if (self.urlscheme == 'ldapi'):
hostport = ldapUrlEscape(self.hostport)
else:
hostport = self.hostport
return f'{self.urlscheme}://{hostport}'
def unparse(self):
if (self.attrs is None):
attrs_str = ''
else:
attrs_str = ','.join(self.attrs)
scope_str = SEARCH_SCOPE_STR[self.scope]
if (self.filterstr is None):
filterstr = ''
else:
filterstr = ldapUrlEscape(self.filterstr)
dn = ldapUrlEscape(self.dn)
if (self.urlscheme == 'ldapi'):
hostport = ldapUrlEscape(self.hostport)
else:
hostport = self.hostport
ldap_url = '{}://{}/{}?{}?{}?{}'.format(self.urlscheme, hostport, dn, attrs_str, scope_str, filterstr)
if self.extensions:
ldap_url = ((ldap_url + '?') + self.extensions.unparse())
return ldap_url
def htmlHREF(self, urlPrefix='', hrefText=None, hrefTarget=None):
if (not isinstance(urlPrefix, str)):
raise TypeError(('urlPrefix must be str, not ' + type(urlPrefix).__name__))
if (hrefText is None):
hrefText = self.unparse()
if (not isinstance(hrefText, str)):
raise TypeError(('hrefText must be str, not ' + type(hrefText).__name__))
if (hrefTarget is None):
target = ''
else:
if (not isinstance(hrefTarget, str)):
raise TypeError(('hrefTarget must be str, not ' + type(hrefTarget).__name__))
target = (' target="%s"' % hrefTarget)
return '<a{} href="{}{}">{}</a>'.format(target, urlPrefix, self.unparse(), hrefText)
def __str__(self):
return self.unparse()
def __repr__(self):
return '<{}.{} instance at {}: {}>'.format(self.__class__.__module__, self.__class__.__name__, hex(id(self)), self.__dict__)
def __getattr__(self, name):
if (name in self.attr2extype):
extype = self.attr2extype[name]
if (self.extensions and (extype in self.extensions) and (not (self.extensions[extype].exvalue is None))):
result = unquote(self.extensions[extype].exvalue)
else:
return None
else:
raise AttributeError('{} has no attribute {}'.format(self.__class__.__name__, name))
return result
def __setattr__(self, name, value):
if (name in self.attr2extype):
extype = self.attr2extype[name]
if (value is None):
delattr(self, name)
elif (value != None):
self.extensions[extype] = LDAPUrlExtension(extype=extype, exvalue=unquote(value))
else:
self.__dict__[name] = value
def __delattr__(self, name):
if (name in self.attr2extype):
extype = self.attr2extype[name]
if self.extensions:
try:
del self.extensions[extype]
except KeyError:
pass
else:
del self.__dict__[name] |
class RPNTest(unittest.TestCase):
def get_gt_and_features(self):
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {'res4': torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = Boxes(gt_boxes)
return (gt_instances, features, images, image_sizes)
def test_rpn(self):
torch.manual_seed(121)
cfg = get_cfg()
backbone = build_backbone(cfg)
proposal_generator = RPN(cfg, backbone.output_shape())
(gt_instances, features, images, image_sizes) = self.get_gt_and_features()
with EventStorage():
(proposals, proposal_losses) = proposal_generator(images, features, [gt_instances[0], gt_instances[1]])
expected_losses = {'loss_rpn_cls': torch.tensor(0.), 'loss_rpn_loc': torch.tensor(0.)}
for name in expected_losses.keys():
err_msg = 'proposal_losses[{}] = {}, expected losses = {}'.format(name, proposal_losses[name], expected_losses[name])
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
self.assertEqual(len(proposals), len(image_sizes))
for (proposal, im_size) in zip(proposals, image_sizes):
self.assertEqual(proposal.image_size, im_size)
expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]])
expected_objectness_logit = torch.tensor([0.1596, (- 0.0007)])
self.assertTrue(torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=0.0001))
self.assertTrue(torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=0.0001))
def verify_rpn(self, conv_dims, expected_conv_dims):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.RPN.CONV_DIMS = conv_dims
backbone = build_backbone(cfg)
proposal_generator = RPN(cfg, backbone.output_shape())
for (k, conv) in enumerate(proposal_generator.rpn_head.conv):
self.assertEqual(expected_conv_dims[k], conv.out_channels)
return proposal_generator
def test_rpn_larger_num_convs(self):
conv_dims = [64, 64, 64, 64, 64]
proposal_generator = self.verify_rpn(conv_dims, conv_dims)
(gt_instances, features, images, image_sizes) = self.get_gt_and_features()
with EventStorage():
(proposals, proposal_losses) = proposal_generator(images, features, [gt_instances[0], gt_instances[1]])
expected_losses = {'loss_rpn_cls': torch.tensor(0.), 'loss_rpn_loc': torch.tensor(0.)}
for name in expected_losses.keys():
err_msg = 'proposal_losses[{}] = {}, expected losses = {}'.format(name, proposal_losses[name], expected_losses[name])
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
def test_rpn_conv_dims_not_set(self):
conv_dims = [(- 1), (- 1), (- 1)]
expected_conv_dims = [1024, 1024, 1024]
self.verify_rpn(conv_dims, expected_conv_dims)
def test_rpn_scriptability(self):
cfg = get_cfg()
proposal_generator = RPN(cfg, {'res4': ShapeSpec(channels=1024, stride=16)}).eval()
num_images = 2
images_tensor = torch.rand(num_images, 30, 40)
image_sizes = [(32, 32), (30, 40)]
images = ImageList(images_tensor, image_sizes)
features = {'res4': torch.rand(num_images, 1024, 1, 2)}
fields = {'proposal_boxes': Boxes, 'objectness_logits': torch.Tensor}
proposal_generator_ts = scripting_with_instances(proposal_generator, fields)
(proposals, _) = proposal_generator(images, features)
(proposals_ts, _) = proposal_generator_ts(images, features)
for (proposal, proposal_ts) in zip(proposals, proposals_ts):
self.assertEqual(proposal.image_size, proposal_ts.image_size)
self.assertTrue(torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor))
self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits))
def test_rrpn(self):
torch.manual_seed(121)
cfg = get_cfg()
cfg.MODEL.PROPOSAL_GENERATOR.NAME = 'RRPN'
cfg.MODEL.ANCHOR_GENERATOR.NAME = 'RotatedAnchorGenerator'
cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
cfg.MODEL.RPN.HEAD_NAME = 'StandardRPNHead'
backbone = build_backbone(cfg)
proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
num_images = 2
images_tensor = torch.rand(num_images, 20, 30)
image_sizes = [(10, 10), (20, 30)]
images = ImageList(images_tensor, image_sizes)
image_shape = (15, 15)
num_channels = 1024
features = {'res4': torch.rand(num_images, num_channels, 1, 2)}
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
gt_instances = Instances(image_shape)
gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
with EventStorage():
(proposals, proposal_losses) = proposal_generator(images, features, [gt_instances[0], gt_instances[1]])
expected_losses = {'loss_rpn_cls': torch.tensor(0.), 'loss_rpn_loc': torch.tensor(0.)}
for name in expected_losses.keys():
err_msg = 'proposal_losses[{}] = {}, expected losses = {}'.format(name, proposal_losses[name], expected_losses[name])
self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
expected_proposal_box = torch.tensor([[(- 1.), 0., 68., 14., 60.], [13., (- 1.), 34., 29.1967659, (- 3.)], [8.1039257, (- 0.), 145., 32., 3.], [5.0, 4., 10.0, 9., 0.]])
expected_objectness_logit = torch.tensor([0., 0.0988187, 0., 0.])
torch.set_printoptions(precision=8, sci_mode=False)
self.assertEqual(len(proposals), len(image_sizes))
proposal = proposals[0]
err_msg = 'computed proposal boxes = {}, expected {}'.format(proposal.proposal_boxes.tensor, expected_proposal_box)
self.assertTrue(torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-05), err_msg)
err_msg = 'computed objectness logits = {}, expected {}'.format(proposal.objectness_logits, expected_objectness_logit)
self.assertTrue(torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-05), err_msg)
def test_find_rpn_proposals_inf(self):
(N, Hi, Wi, A) = (3, 3, 3, 3)
proposals = [torch.rand(N, ((Hi * Wi) * A), 4)]
pred_logits = [torch.rand(N, ((Hi * Wi) * A))]
pred_logits[0][1][3:5].fill_(float('inf'))
find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False)
def test_find_rpn_proposals_tracing(self):
(N, Hi, Wi, A) = (3, 50, 50, 9)
proposal = torch.rand(N, ((Hi * Wi) * A), 4)
pred_logit = torch.rand(N, ((Hi * Wi) * A))
def func(proposal, logit, image_size):
r = find_top_rpn_proposals([proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False)[0]
size = r.image_size
if (not isinstance(size, torch.Tensor)):
size = torch.tensor(size)
return (size, r.proposal_boxes.tensor, r.objectness_logits)
other_inputs = []
for (Hi, Wi, shp) in [(30, 30, 60), (10, 10, 800)]:
other_inputs.append((torch.rand(N, ((Hi * Wi) * A), 4), torch.rand(N, ((Hi * Wi) * A)), torch.tensor([shp, shp])))
torch.jit.trace(func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs)
def test_append_gt_to_proposal(self):
proposals = Instances((10, 10), **{'proposal_boxes': Boxes(torch.empty((0, 4))), 'objectness_logits': torch.tensor([]), 'custom_attribute': torch.tensor([])})
gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]]))
self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals])
gt_instances = Instances((10, 10))
gt_instances.gt_boxes = gt_boxes
self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals])
gt_instances.custom_attribute = torch.tensor([1])
gt_instances.custom_attribute2 = torch.tensor([1])
new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0]
self.assertEqual(new_proposals.custom_attribute[0], 1)
self.assertRaises(AttributeError, (lambda : new_proposals.custom_attribute2)) |
class BasicModule(nn.Module):
def __init__(self, inDim, outDim, hidden_dim=1000, dp_rate=0.3):
super(BasicModule, self).__init__()
self.layers = nn.Sequential(nn.Linear(inDim, hidden_dim), nn.ReLU(), nn.Dropout(p=dp_rate), nn.Linear(hidden_dim, outDim))
def forward(self, x):
return self.layers(x) |
def channel_pruning_example(config: argparse.Namespace):
data_pipeline = ImageNetDataPipeline(config)
model = models.resnet18(pretrained=True)
if config.use_cuda:
model.to(torch.device('cuda'))
model.eval()
accuracy = data_pipeline.evaluate(model, use_cuda=config.use_cuda)
logger.info('Original Model top-1 accuracy = %.2f', accuracy)
logger.info('Starting Channel Pruning')
data_loader = ImageNetDataLoader(is_training=True, images_dir=_config.dataset_dir, image_size=224).data_loader
(compressed_model, stats) = aimet_channel_pruning(model=model, evaluator=data_pipeline.evaluate, data_loader=data_loader)
logger.info(stats)
with open(os.path.join(config.logdir, 'log.txt'), 'w') as outfile:
outfile.write(('%s\n\n' % stats))
accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)
logger.info('After Channel Pruning, top-1 accuracy = %.2f', accuracy)
logger.info('Model Channel Pruning Complete')
logger.info('Starting Model Finetuning')
data_pipeline.finetune(compressed_model)
accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)
logger.info('Finetuned Compressed Model top-1 accuracy = %.2f', accuracy)
logger.info('Model Finetuning Complete')
torch.save(compressed_model, os.path.join(config.logdir, 'compressed_model.pth')) |
class SemanticAnalyzerPreAnalysis(TraverserVisitor):
def visit_file(self, file: MypyFile, fnam: str, mod_id: str, options: Options) -> None:
self.platform = options.platform
self.cur_mod_id = mod_id
self.cur_mod_node = file
self.options = options
self.is_global_scope = True
self.skipped_lines: set[int] = set()
for (i, defn) in enumerate(file.defs):
defn.accept(self)
if (isinstance(defn, AssertStmt) and assert_will_always_fail(defn, options)):
if (i < (len(file.defs) - 1)):
(next_def, last) = (file.defs[(i + 1)], file.defs[(- 1)])
if (last.end_line is not None):
self.skipped_lines |= set(range(next_def.line, (last.end_line + 1)))
del file.defs[(i + 1):]
break
file.skipped_lines = self.skipped_lines
def visit_func_def(self, node: FuncDef) -> None:
old_global_scope = self.is_global_scope
self.is_global_scope = False
super().visit_func_def(node)
self.is_global_scope = old_global_scope
file_node = self.cur_mod_node
if (self.is_global_scope and file_node.is_stub and (node.name == '__getattr__') and file_node.is_package_init_file()):
file_node.is_partial_stub_package = True
def visit_class_def(self, node: ClassDef) -> None:
old_global_scope = self.is_global_scope
self.is_global_scope = False
super().visit_class_def(node)
self.is_global_scope = old_global_scope
def visit_import_from(self, node: ImportFrom) -> None:
node.is_top_level = self.is_global_scope
super().visit_import_from(node)
def visit_import_all(self, node: ImportAll) -> None:
node.is_top_level = self.is_global_scope
super().visit_import_all(node)
def visit_import(self, node: Import) -> None:
node.is_top_level = self.is_global_scope
super().visit_import(node)
def visit_if_stmt(self, s: IfStmt) -> None:
infer_reachability_of_if_statement(s, self.options)
for expr in s.expr:
expr.accept(self)
for node in s.body:
node.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
if (b.end_line is not None):
self.skipped_lines |= set(range(b.line, (b.end_line + 1)))
return
super().visit_block(b)
def visit_match_stmt(self, s: MatchStmt) -> None:
infer_reachability_of_match_statement(s, self.options)
for guard in s.guards:
if (guard is not None):
guard.accept(self)
for body in s.bodies:
body.accept(self)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
pass
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
pass
def visit_return_stmt(self, s: ReturnStmt) -> None:
pass
def visit_for_stmt(self, s: ForStmt) -> None:
s.body.accept(self)
if (s.else_body is not None):
s.else_body.accept(self) |
class AbbreviatedFirstNameAnalyzer(_InitialsAnalyzer):
TAG_PATTERN = 'NOUN,anim,%(gender)s,Sgtm,Name,Fixd,Abbr,Init sing,%(case)s'
def init(self, morph):
super(AbbreviatedFirstNameAnalyzer, self).init(morph)
self._tags_masc = [tag for tag in self._tags if ('masc' in tag)]
self._tags_femn = [tag for tag in self._tags if ('femn' in tag)]
assert ((self._tags_masc + self._tags_femn) == self._tags)
def _init_grammemes(self, tag_cls):
super(AbbreviatedFirstNameAnalyzer, self)._init_grammemes(tag_cls)
self.morph.TagClass.add_grammemes_to_known('Name', '', overwrite=False)
def get_lexeme(self, form):
(fixed_word, form_tag, normal_form, score, methods_stack) = form
tags = (self._tags_masc if ('masc' in form_tag) else self._tags_femn)
return [(fixed_word, tag, normal_form, score, methods_stack) for tag in tags]
def normalized(self, form):
(fixed_word, form_tag, normal_form, score, methods_stack) = form
tags = (self._tags_masc if ('masc' in form_tag) else self._tags_femn)
return (fixed_word, tags[0], normal_form, score, methods_stack) |
class TrainLoop():
def __init__(self, *, model, diffusion, data, batch_size, microbatch, lr, ema_rate, log_interval, save_interval, resume_checkpoint, use_fp16=False, fp16_scale_growth=0.001, schedule_sampler=None, weight_decay=0.0, lr_anneal_steps=0, class_cond=False):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = (microbatch if (microbatch > 0) else batch_size)
self.lr = lr
self.ema_rate = ([ema_rate] if isinstance(ema_rate, float) else [float(x) for x in ema_rate.split(',')])
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = (schedule_sampler or UniformSampler(diffusion))
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.class_cond = class_cond
self.step = 0
self.resume_step = 0
self.global_batch = (self.batch_size * dist.get_world_size())
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
if self.use_fp16:
self._setup_fp16()
self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay)
if self.resume_step:
self._load_optimizer_state()
self.ema_params = [self._load_ema_parameters(rate) for rate in self.ema_rate]
else:
self.ema_params = [copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(self.model, device_ids=[dist_util.dev()], output_device=dist_util.dev(), broadcast_buffers=False, bucket_cap_mb=128, find_unused_parameters=False)
else:
if (dist.get_world_size() > 1):
logger.warn('Distributed training requires CUDA. Gradients will not be synchronized properly!')
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if (dist.get_rank() == 0):
logger.log(f'loading model from checkpoint: {resume_checkpoint}...')
self.model.load_state_dict(dist_util.load_state_dict(resume_checkpoint, map_location=dist_util.dev()))
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.master_params)
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if (dist.get_rank() == 0):
logger.log(f'loading EMA from checkpoint: {ema_checkpoint}...')
state_dict = dist_util.load_state_dict(ema_checkpoint, map_location=dist_util.dev())
ema_params = self._state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
opt_checkpoint = bf.join(bf.dirname(main_checkpoint), f'opt{self.resume_step:06}.pt')
if bf.exists(opt_checkpoint):
logger.log(f'loading optimizer state from checkpoint: {opt_checkpoint}')
state_dict = dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
self.opt.load_state_dict(state_dict)
def _setup_fp16(self):
self.master_params = make_master_params(self.model_params)
self.model.convert_to_fp16()
def convert_labels(self, labels, index=1):
cond = {}
if (index == 2):
labels[(labels < 2)] = 0
labels[(labels > 3)] = 0
else:
labels[(labels != index)] = 0
cls_label = th.zeros(labels.size(0))
for i in range(labels.size(0)):
if (th.sum(labels[i]) > 0):
cls_label[i] = index
cond['y'] = cls_label.long()
return cond
def run_loop(self):
while ((not self.lr_anneal_steps) or ((self.step + self.resume_step) < self.lr_anneal_steps)):
for batch_data in self.data:
(batch, cond) = (batch_data[0].float(), batch_data[1].long())
self.run_step(batch, cond)
if ((self.step % self.log_interval) == 0):
logger.dumpkvs()
if ((self.step % self.save_interval) == 0):
self.save()
if (os.environ.get('DIFFUSION_TRAINING_TEST', '') and (self.step > 0)):
return
self.step += 1
if (((self.step - 1) % self.save_interval) != 0):
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
if self.use_fp16:
self.optimize_fp16()
else:
self.optimize_normal()
self.log_step()
def forward_backward(self, batch, cond):
zero_grad(self.model_params)
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i:(i + self.microbatch)].to(dist_util.dev())
if self.class_cond:
micro_cond = {k: v[i:(i + self.microbatch)].to(dist_util.dev()) for (k, v) in cond.items()}
else:
micro_cond = {}
last_batch = ((i + self.microbatch) >= batch.shape[0])
(t, weights) = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond)
if (last_batch or (not self.use_ddp)):
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(t, losses['loss'].detach())
loss = (losses['loss'] * weights).mean()
log_loss_dict(self.diffusion, t, {k: (v * weights) for (k, v) in losses.items()})
if self.use_fp16:
loss_scale = (2 ** self.lg_loss_scale)
(loss * loss_scale).backward()
else:
loss.backward()
def optimize_fp16(self):
if any(((not th.isfinite(p.grad).all()) for p in self.model_params)):
self.lg_loss_scale -= 1
logger.log(f'Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}')
return
model_grads_to_master_grads(self.model_params, self.master_params)
self.master_params[0].grad.mul_((1.0 / (2 ** self.lg_loss_scale)))
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for (rate, params) in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
master_params_to_model_params(self.model_params, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
def optimize_normal(self):
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for (rate, params) in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
def _log_grad_norm(self):
sqsum = 0.0
for p in self.master_params:
sqsum += (p.grad ** 2).sum().item()
logger.logkv_mean('grad_norm', np.sqrt(sqsum))
def _anneal_lr(self):
if (not self.lr_anneal_steps):
return
frac_done = ((self.step + self.resume_step) / self.lr_anneal_steps)
lr = (self.lr * (1 - frac_done))
for param_group in self.opt.param_groups:
param_group['lr'] = lr
def log_step(self):
logger.logkv('step', (self.step + self.resume_step))
logger.logkv('samples', (((self.step + self.resume_step) + 1) * self.global_batch))
if self.use_fp16:
logger.logkv('lg_loss_scale', self.lg_loss_scale)
def save(self):
def save_checkpoint(rate, params):
state_dict = self._master_params_to_state_dict(params)
if (dist.get_rank() == 0):
logger.log(f'saving model {rate}...')
if (not rate):
filename = f'model.pt'
else:
filename = f'ema_{rate}.pt'
print(filename)
print(bf.join(get_blob_logdir(), filename))
with bf.BlobFile(bf.join(get_blob_logdir(), filename), 'wb') as f:
th.save(state_dict, f)
save_checkpoint(0, self.master_params)
for (rate, params) in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
dist.barrier()
def _master_params_to_state_dict(self, master_params):
if self.use_fp16:
master_params = unflatten_master_params(self.model.parameters(), master_params)
state_dict = self.model.state_dict()
for (i, (name, _value)) in enumerate(self.model.named_parameters()):
assert (name in state_dict)
state_dict[name] = master_params[i]
return state_dict
def _state_dict_to_master_params(self, state_dict):
params = [state_dict[name] for (name, _) in self.model.named_parameters()]
if self.use_fp16:
return make_master_params(params)
else:
return params |
def project_version():
version = None
if (not version):
try:
output = subprocess.check_output(['git', 'describe', '--tags', '--always'], stderr=open(os.devnull, 'wb')).strip().decode()
except (FileNotFoundError, subprocess.CalledProcessError):
pass
else:
try:
(base, distance, commit_hash) = output.split('-')
except ValueError:
version = output
else:
version = '{}.{}+{}'.format(base, distance, commit_hash)
if ((not version) and os.path.exists(VERSION)):
with open(VERSION) as verfile:
version = verfile.read().strip()
if (not version):
raise RuntimeError('cannot detect project version')
return version |
class JSCoverage(object):
def __init__(self, client: CDPSession) -> None:
self._client = client
self._enabled = False
self._scriptURLs: Dict = dict()
self._scriptSources: Dict = dict()
self._eventListeners: List = list()
self._resetOnNavigation = False
async def start(self, options: Dict=None, **kwargs: Any) -> None:
options = merge_dict(options, kwargs)
if self._enabled:
raise PageError('JSCoverage is always enabled.')
self._resetOnNavigation = (True if ('resetOnNavigation' not in options) else bool(options['resetOnNavigation']))
self._reportAnonymousScript = bool(options.get('reportAnonymousScript'))
self._enabled = True
self._scriptURLs.clear()
self._scriptSources.clear()
self._eventListeners = [helper.addEventListener(self._client, 'Debugger.scriptParsed', (lambda e: self._client._loop.create_task(self._onScriptParsed(e)))), helper.addEventListener(self._client, 'Runtime.executionContextsCleared', self._onExecutionContextsCleared)]
(await self._client.send('Profiler.enable'))
(await self._client.send('Profiler.startPreciseCoverage', {'callCount': False, 'detailed': True}))
(await self._client.send('Debugger.enable'))
(await self._client.send('Debugger.setSkipAllPauses', {'skip': True}))
def _onExecutionContextsCleared(self, event: Dict) -> None:
if (not self._resetOnNavigation):
return
self._scriptURLs.clear()
self._scriptSources.clear()
async def _onScriptParsed(self, event: Dict) -> None:
if (event.get('url') == EVALUATION_SCRIPT_URL):
return
if ((not event.get('url')) and (not self._reportAnonymousScript)):
return
scriptId = event.get('scriptId')
url = event.get('url')
if ((not url) and self._reportAnonymousScript):
url = f'debugger://VM{scriptId}'
try:
response = (await self._client.send('Debugger.getScriptSource', {'scriptId': scriptId}))
self._scriptURLs[scriptId] = url
self._scriptSources[scriptId] = response.get('scriptSource')
except Exception as e:
debugError(logger, e)
async def stop(self) -> List:
if (not self._enabled):
raise PageError('JSCoverage is not enabled.')
self._enabled = False
result = (await self._client.send('Profiler.takePreciseCoverage'))
(await self._client.send('Profiler.stopPreciseCoverage'))
(await self._client.send('Profiler.disable'))
(await self._client.send('Debugger.disable'))
helper.removeEventListeners(self._eventListeners)
coverage: List = []
for entry in result.get('result', []):
url = self._scriptURLs.get(entry.get('scriptId'))
text = self._scriptSources.get(entry.get('scriptId'))
if ((text is None) or (url is None)):
continue
flattenRanges: List = []
for func in entry.get('functions', []):
flattenRanges.extend(func.get('ranges', []))
ranges = convertToDisjointRanges(flattenRanges)
coverage.append({'url': url, 'ranges': ranges, 'text': text})
return coverage |
.pydicom
def test_identifier_is_sequence_vr():
replacement_strategy = pseudonymisation_api.pseudonymisation_dispatch
logging.info('Using pseudonymisation strategy')
identifying_keywords_no_SQ = ['PatientID', 'RequestedProcedureID']
identifying_keywords_with_SQ_vr = ['PatientID', 'RequestedProcedureID', 'RequestAttributesSequence']
identifying_requested_procedure_id = 'Tumour Identification'
non_identifying_scheduled_procedure_step_id = 'Tumour ID with Dual Energy'
ds_input = dicom_dataset_from_dict({'PatientID': 'ABC123', 'RequestAttributesSequence': [{'RequestedProcedureID': identifying_requested_procedure_id, 'ScheduledProcedureStepID': non_identifying_scheduled_procedure_step_id}]})
assert (ds_input.RequestAttributesSequence[0].RequestedProcedureID is not None)
ds_anon = anonymise_dataset(ds_input, replacement_strategy=replacement_strategy, identifying_keywords=identifying_keywords_with_SQ_vr)
assert ('RequestedProcedureID' not in ds_anon.RequestAttributesSequence[0])
ds_anon = anonymise_dataset(ds_input, replacement_strategy=replacement_strategy, identifying_keywords=identifying_keywords_no_SQ)
assert (ds_anon.RequestAttributesSequence is not None)
assert (ds_anon.RequestAttributesSequence[0] is not None)
assert (ds_anon.RequestAttributesSequence[0].RequestedProcedureID is not None)
assert (ds_anon.RequestAttributesSequence[0].ScheduledProcedureStepID is not None)
assert (ds_anon.RequestAttributesSequence[0].RequestedProcedureID != identifying_requested_procedure_id)
assert (ds_anon.RequestAttributesSequence[0].ScheduledProcedureStepID == non_identifying_scheduled_procedure_step_id) |
def parse_arguments(parser):
parser.add_argument('--mode', type=str, default='test')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--digit2zero', action='store_true', default=True)
parser.add_argument('--dataset', type=str, default='spanish')
parser.add_argument('--affix', type=str, default='sd')
parser.add_argument('--embedding_file', type=str, default='data/cc.es.300.vec')
parser.add_argument('--embedding_dim', type=int, default=300)
parser.add_argument('--optimizer', type=str, default='sgd')
parser.add_argument('--learning_rate', type=float, default=0.2)
parser.add_argument('--momentum', type=float, default=0.0)
parser.add_argument('--l2', type=float, default=1e-08)
parser.add_argument('--lr_decay', type=float, default=0.1)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--train_num', type=int, default=(- 1))
parser.add_argument('--dev_num', type=int, default=(- 1))
parser.add_argument('--test_num', type=int, default=(- 1))
parser.add_argument('--eval_freq', type=int, default=4000, help='evaluate frequency (iteration)')
parser.add_argument('--eval_epoch', type=int, default=0, help='evaluate the dev set after this number of epoch')
parser.add_argument('--hidden_dim', type=int, default=200, help='hidden size of the Syn-LSTM')
parser.add_argument('--num_lstm_layer', type=int, default=0, help='Do not use this flag when tesing our model, this is designed for baselines.')
parser.add_argument('--dep_emb_size', type=int, default=50, help='embedding size of dependency')
parser.add_argument('--dep_hidden_dim', type=int, default=200, help='hidden size of gcn')
parser.add_argument('--num_gcn_layers', type=int, default=2, help='number of gcn layers')
parser.add_argument('--gcn_mlp_layers', type=int, default=0, help='number of mlp layers after gcn')
parser.add_argument('--gcn_dropout', type=float, default=0.5, help='GCN dropout')
parser.add_argument('--gcn_adj_directed', type=int, default=0, choices=[0, 1], help='GCN ajacent matrix directed')
parser.add_argument('--gcn_adj_selfloop', type=int, default=0, choices=[0, 1], help='GCN selfloop in adjacent matrix, now always false as add it in the model')
parser.add_argument('--gcn_gate', type=int, default=0, choices=[0, 1], help='add edge_wise gating')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout for embedding')
parser.add_argument('--use_char_rnn', type=int, default=1, choices=[0, 1], help='use character-level lstm, 0 or 1')
parser.add_argument('--dep_model', type=str, default='dggcn', choices=['none', 'dggcn', 'dglstm'], help='dg_gcn mode consists of both GCN and Syn-LSTM')
parser.add_argument('--inter_func', type=str, default='mlp', choices=['concatenation', 'addition', 'mlp'], help='combination method, 0 concat, 1 additon, 2 gcn, 3 more parameter gcn')
parser.add_argument('--context_emb', type=str, default='none', choices=['none', 'bert', 'elmo', 'flair'], help='contextual word embedding')
args = parser.parse_args()
for k in args.__dict__:
print(((k + ': ') + str(args.__dict__[k])))
return args |
def configure(config: Config) -> None:
cucumber_json_path = config.option.cucumber_json_path
if (cucumber_json_path and (not hasattr(config, 'workerinput'))):
config._bddcucumberjson = LogBDDCucumberJSON(cucumber_json_path)
config.pluginmanager.register(config._bddcucumberjson) |
class nnUNetTrainer_probabilisticOversampling_010(nnUNetTrainer_probabilisticOversampling):
def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool=True, device: torch.device=torch.device('cuda')):
super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
self.oversample_foreground_percent = 0.1 |
class struct_tagLAYERPLANEDESCRIPTOR(Structure):
__slots__ = ['nSize', 'nVersion', 'dwFlags', 'iPixelType', 'cColorBits', 'cRedBits', 'cRedShift', 'cGreenBits', 'cGreenShift', 'cBlueBits', 'cBlueShift', 'cAlphaBits', 'cAlphaShift', 'cAccumBits', 'cAccumRedBits', 'cAccumGreenBits', 'cAccumBlueBits', 'cAccumAlphaBits', 'cDepthBits', 'cStencilBits', 'cAuxBuffers', 'iLayerPlane', 'bReserved', 'crTransparent'] |
def assert_attrs_equal(attrs, attrs_exp, tolerance=0):
keys_diff = set(attrs).difference(set(attrs_exp))
assert (not keys_diff), 'Different set of keys: {}'.format(keys_diff)
for key in attrs_exp:
err_msg = 'Attribute {} does not match expectation'.format(key)
if isinstance(attrs[key], dict):
assert_attrs_equal(attrs[key], attrs_exp[key], tolerance)
else:
try:
np.testing.assert_allclose(attrs[key], attrs_exp[key], rtol=tolerance, err_msg=err_msg)
except TypeError:
assert (attrs[key] == attrs_exp[key]), err_msg |
class GpioHooks():
def __init__(self, ql, pin_num):
self.ql = ql
self.hook_set_func = ([None] * pin_num)
self.hook_reset_func = ([None] * pin_num)
def hook_set(self, pin, func, *args, **kwargs):
self.hook_set_func[pin] = (func, args, kwargs)
def hook_reset(self, pin, func, *args, **kwargs):
self.hook_reset_func[pin] = (func, args, kwargs)
def hook_del_set(self, pin):
self.hook_set_func[pin] = None
def hook_del_reset(self, pin):
self.hook_reset_func[pin] = None
def call_hook_set(self, pin):
if self.hook_set_func[pin]:
(func, args, kwargs) = self.hook_set_func[pin]
func(*args, **kwargs)
def call_hook_reset(self, pin):
if self.hook_reset_func[pin]:
(func, args, kwargs) = self.hook_reset_func[pin]
func(*args, **kwargs) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='Path to the snapshot file.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--speedup', '-s', type=float, default=1)
parser.add_argument('--deterministic', '-d', dest='deterministic', action='store_true')
parser.add_argument('--no-deterministic', '-nd', dest='deterministic', action='store_false')
parser.add_argument('--policy_h', type=int)
parser.set_defaults(deterministic=True)
args = parser.parse_args()
return args |
def main(argv):
import time
start = time.time()
(parser, subparsers) = setup_args()
for c in codecs:
cparser = subparsers.add_parser(c.__name__.lower(), help=f'{c.__name__}')
setup_common_args(cparser)
c.setup_args(cparser)
args = parser.parse_args(argv)
codec_cls = next((c for c in codecs if (c.__name__.lower() == args.codec)))
codec = codec_cls(args)
results = collect(codec, args.dataset, args.qualities, args.num_jobs)
output = {'name': codec.name, 'description': codec.description, 'results': results}
print(json.dumps(output, indent=2))
end = time.time()
print('total time:', (end - start))
output_dir = '/home/felix/disk2/compressai_v2/codes/results/log'
output_json_path = os.path.join(output_dir, (args.name + '.json'))
with open(output_json_path, 'w') as f:
json.dump(output, f, indent=2) |
def test_convert_outer_out_to_in_mit_sot():
rng_state = np.random.default_rng(1234)
rng_tt = pytensor.shared(rng_state, name='rng', borrow=True)
rng_tt.tag.is_rng = True
rng_tt.default_update = rng_tt
def input_step_fn(y_tm1, y_tm2, rng):
y_tm1.name = 'y_tm1'
y_tm2.name = 'y_tm2'
return pt.random.normal((y_tm1 + y_tm2), 1.0, rng=rng, name='Y_t')
(Y_rv, _) = pytensor.scan(fn=input_step_fn, outputs_info=[{'initial': pt.as_tensor_variable(np.r_[((- 1.0), 0.0)]), 'taps': [(- 1), (- 2)]}], non_sequences=[rng_tt], n_steps=10)
Y_rv.name = 'Y_rv'
Y_all = Y_rv.owner.inputs[0]
Y_all.name = 'Y_all'
Y_obs = pt.as_tensor_variable(Y_rv.eval())
Y_obs.name = 'Y_obs'
input_scan_args = ScanArgs.from_node(Y_rv.owner.inputs[0].owner)
def output_step_fn(y_t, y_tm1, y_tm2):
y_t.name = 'y_t'
y_tm1.name = 'y_tm1'
y_tm2.name = 'y_tm2'
logp = _logprob_helper(pt.random.normal((y_tm1 + y_tm2), 1.0), y_t)
logp.name = 'logp(y_t)'
return logp
(Y_logp, _) = pytensor.scan(fn=output_step_fn, sequences=[{'input': Y_obs, 'taps': [0, (- 1), (- 2)]}], outputs_info=[{}])
(oo_idx, oo_var, io_var) = get_random_outer_outputs(input_scan_args)[0]
value_map = {Y_all: Y_obs}
test_scan_args = convert_outer_out_to_in(input_scan_args, [oo_var], value_map, inner_out_fn=create_inner_out_logp)
(scan_out, updates) = construct_scan(test_scan_args)
res = scan_out[oo_idx].eval()
exp_res = Y_logp.eval()
assert np.array_equal(res, exp_res) |
class Command(BaseCommand):
def get_success_pages(self):
return Page.objects.filter(path__startswith='about/success/')
def image_url(self, path):
new_url = path.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)
return new_url.replace('//', '/')
def fix_image(self, path, page):
url = f'
r = requests.get(url)
if (r.status_code != 200):
print(f"ERROR Couldn't load {url}")
return
img = Image()
img.page = page
filename = os.path.basename(urlparse(url).path)
output_path = page_image_path(img, filename)
directory = os.path.dirname(output_path)
if (not os.path.exists(directory)):
os.makedirs(directory)
with open(output_path, 'wb') as f:
f.write(r.content)
reopen = open(output_path, 'rb')
new_file = File(reopen)
img.image.save(filename, new_file, save=True)
return self.image_url(output_path)
def find_image_paths(self, page):
content = page.content.raw
paths = set(re.findall('(/files/success.*)\\b', content))
if paths:
print(f'Found {len(paths)} matches in {page.path}')
return paths
def process_success_story(self, page):
image_paths = self.find_image_paths(page)
for path in image_paths:
new_url = self.fix_image(path, page)
print(f' Fixing {path} -> {new_url}')
content = page.content.raw
new_content = content.replace(path, new_url)
page.content = new_content
page.save()
def handle(self, *args, **kwargs):
self.pages = self.get_success_pages()
print(f'Found {len(self.pages)} success pages')
for p in self.pages:
self.process_success_story(p) |
def test_multiple_inheritance_python():
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class B1(object):
def v(self):
return 1
class MI2(B1, m.Base1, m.Base2):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI3(MI2):
def __init__(self, i, j):
MI2.__init__(self, i, j)
class MI4(MI3, m.Base2):
def __init__(self, i, j):
MI3.__init__(self, i, j)
m.Base2.__init__(self, (i + 100))
class MI5(m.Base2, B1, m.Base1):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI6(m.Base2, B1):
def __init__(self, i):
m.Base2.__init__(self, i)
B1.__init__(self)
class B2(B1):
def v(self):
return 2
class B3(object):
def v(self):
return 3
class B4(B3, B2):
def v(self):
return 4
class MI7(B4, MI6):
def __init__(self, i):
B4.__init__(self)
MI6.__init__(self, i)
class MI8(MI6, B3):
def __init__(self, i):
MI6.__init__(self, i)
B3.__init__(self)
class MI8b(B3, MI6):
def __init__(self, i):
B3.__init__(self)
MI6.__init__(self, i)
mi1 = MI1(1, 2)
assert (mi1.foo() == 1)
assert (mi1.bar() == 2)
mi2 = MI2(3, 4)
assert (mi2.v() == 1)
assert (mi2.foo() == 3)
assert (mi2.bar() == 4)
mi3 = MI3(5, 6)
assert (mi3.v() == 1)
assert (mi3.foo() == 5)
assert (mi3.bar() == 6)
mi4 = MI4(7, 8)
assert (mi4.v() == 1)
assert (mi4.foo() == 7)
assert (mi4.bar() == 8)
mi5 = MI5(10, 11)
assert (mi5.v() == 1)
assert (mi5.foo() == 10)
assert (mi5.bar() == 11)
mi6 = MI6(12)
assert (mi6.v() == 1)
assert (mi6.bar() == 12)
mi7 = MI7(13)
assert (mi7.v() == 4)
assert (mi7.bar() == 13)
mi8 = MI8(14)
assert (mi8.v() == 1)
assert (mi8.bar() == 14)
mi8b = MI8b(15)
assert (mi8b.v() == 3)
assert (mi8b.bar() == 15) |
class Issue(Model):
id = IntType(required=True)
node_id = StringType(required=True)
url = StringType(required=True)
repository_url = StringType(required=True)
labels_url = StringType(required=True)
comments_url = StringType(required=True)
events_url = StringType(required=True)
html_url = StringType(required=True)
number = IntType(required=True)
state = EnumValueType(IssueState, required=True)
state_reason = EnumValueType(StateReason, required=True)
title = StringType(required=True)
user = ModelType(SimpleUser)
labels = ListType(ModelType(Label, required=True), required=True)
assignee = ModelType(SimpleUser)
assignees = ListType(ModelType(SimpleUser, required=True))
locked = BooleanType(required=True)
active_lock_reason = StringType()
comments = IntType(required=True)
closed_at = DateTimeType(serialized_format='%Y-%m-%dT%H:%M:%S')
created_at = DateTimeType(serialized_format='%Y-%m-%dT%H:%M:%S')
updated_at = DateTimeType(serialized_format='%Y-%m-%dT%H:%M:%S')
author_association = EnumValueType(AuthorAssociation, required=True)
reactions = ModelType(Reactions, serialize_when_none=False)
pull_request = ModelType(PullRequest, serialize_when_none=False)
body_html = StringType(serialize_when_none=False)
body_text = StringType(serialize_when_none=False)
timeline_url = StringType(serialize_when_none=False)
body = StringType(serialize_when_none=False) |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
.slow
def test_cc_helper_rhf():
cell = gto.Cell()
cell.atom = '\n C 0. 0. 0.\n C 1. 1. 1.\n '
cell.basis = 'gth-szv'
cell.pseudo = 'gth-hf-rev'
cell.a = '\n 0., 3., 3.\n 3., 0., 3.\n 3., 3., 0.'
cell.unit = 'B'
cell.verbose = 0
cell.build()
kmesh = [1, 1, 3]
kpts = cell.make_kpts(kmesh)
mf = scf.KRHF(cell, kpts).rs_density_fit()
scf_dict = chkfile.load(_TEST_CHK, 'scf')
mf.__dict__.update(scf_dict)
mf.with_df._cderi = _TEST_CHK
dm0 = mf.make_rdm1()
mf.with_df.mesh = cell.mesh
mf.kernel(dm0=dm0)
mymp = mp.KMP2(mf)
Luv = cholesky_from_df_ints(mymp)
cc_inst = KRCCSD(mf)
ref_eris = cc_inst.ao2mo()
(emp2_ref, _, _) = cc_inst.init_amps(ref_eris)
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf)
test_eris = build_approximate_eris(cc_inst, helper)
eri_blocks = ['oooo', 'ooov', 'oovv', 'ovov', 'voov', 'vovv']
for block in eri_blocks:
assert np.allclose(test_eris.__dict__[block][:], ref_eris.__dict__[block][:])
(emp2_approx, _, _) = cc_inst.init_amps(test_eris)
assert (abs((emp2_approx - emp2_ref)) < 1e-12)
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf, naux=10)
test_eris_approx = build_approximate_eris(cc_inst, helper)
for block in eri_blocks:
assert (not np.allclose(test_eris_approx.__dict__[block][:], ref_eris.__dict__[block][:]))
(emp2_approx, _, _) = cc_inst.init_amps(test_eris_approx)
assert (abs((emp2_approx - emp2_ref)) > 1e-12)
cc_exact = KRCCSD(mf)
(ecc_exact, _, _) = cc_exact.kernel()
cc_approx = KRCCSD(mf)
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf)
test_eris = build_approximate_eris(cc_approx, helper)
cc_approx.ao2mo = (lambda mo_coeff=None: test_eris)
(emp2_approx, _, _) = cc_approx.init_amps(test_eris)
(ecc_approx, _, _) = cc_approx.kernel()
assert (abs((ecc_exact - ecc_approx)) < 1e-12)
helper = SingleFactorization(cholesky_factor=Luv, kmf=mf, naux=10)
test_eris = build_approximate_eris(cc_approx, helper)
for block in eri_blocks:
assert (not np.allclose(test_eris.__dict__[block][:], ref_eris.__dict__[block][:]))
cc_approx = KRCCSD(mf)
cc_approx.ao2mo = (lambda mo_coeff=None: test_eris)
(ecc_approx, _, _) = cc_approx.kernel()
assert (abs((ecc_exact - ecc_approx)) > 1e-12) |
class QuantPruneTest(unittest.TestCase):
((torch.cuda.device_count() <= 1), 'Not enough GPUs available')
def test_qebc_pruned_tw(self) -> None:
batch_size: int = 4
world_size = 2
local_device = torch.device('cuda:0')
num_embedding = 100
emb_dim = 64
pruned_entry = 40
table_specs: List[Tuple[(int, int, int)]] = [(num_embedding, emb_dim, num_embedding), (num_embedding, emb_dim, (num_embedding - pruned_entry))]
mi = create_test_model(num_embedding, emb_dim, world_size, batch_size, dense_device=local_device, sparse_device=local_device, quant_state_dict_split_scale_bias=True, num_features=len(table_specs))
pruning_ebc_dict: Dict[(str, torch.Tensor)] = {}
table_1_spec = table_specs[1]
table_1_num_emb: int = table_1_spec[0]
table_1_pruned_num_emb: int = table_1_spec[2]
table_1_remapping_indices = torch.full(fill_value=(- 1), size=[table_1_num_emb], dtype=torch.int32)
table_1_remapping_indices[(- table_1_pruned_num_emb):] = torch.arange(table_1_pruned_num_emb, dtype=torch.int32)
pruning_ebc_dict['table_1'] = table_1_remapping_indices
quant_model = prune_and_quantize_model(mi.model, pruning_ebc_dict)
mi.quant_model = quant_model
expected_shards = [[((0, 0, table_specs[0][2], table_specs[0][1]), 'rank:0/cuda:0')], [((0, 0, table_specs[1][2], table_specs[1][1]), 'rank:1/cuda:1')]]
sharded_model = shard_qebc(mi, sharding_type=ShardingType.TABLE_WISE, device=local_device, expected_shards=expected_shards)
inputs = [model_input_to_forward_args(inp.to(local_device)) for inp in prep_inputs(mi, world_size, batch_size, long_indices=False)]
sharded_model.load_state_dict(quant_model.state_dict())
quant_output = quant_model(*inputs[0])
sharded_output = sharded_model(*inputs[0])
assert_close(quant_output, sharded_output)
gm: torch.fx.GraphModule = symbolic_trace(sharded_model)
gm_script = torch.jit.script(gm)
buffer = io.BytesIO()
torch.jit.save(gm_script, buffer)
buffer.seek(0)
loaded_gm_script = torch.jit.load(buffer)
gm_script_output = loaded_gm_script(*inputs[0])
assert_close(quant_output, gm_script_output)
weights_spec: Dict[(str, WeightSpec)] = sharded_tbes_weights_spec(sharded_model)
assert_weight_spec(weights_spec, expected_shards, '_module.sparse.ebc', 'embedding_bags', ['table_0', 'table_1'], ShardingType.TABLE_WISE.value)
((torch.cuda.device_count() <= 1), 'Not enough GPUs available')
def test_qebc_pruned_tw_one_ebc(self) -> None:
batch_size: int = 1
table_specs: List[Tuple[(int, int, int)]] = [(200, 10, 100)]
world_size = 2
local_device = torch.device('cuda:0')
topology: Topology = Topology(world_size=world_size, compute_device=local_device.type)
tables = [EmbeddingBagConfig(num_embeddings=num_emb, embedding_dim=emb_dim, name=('table_' + str(i)), feature_names=[('feature_' + str(i))]) for (i, (num_emb, emb_dim, _)) in enumerate(table_specs)]
model = torch.nn.Sequential(EmbeddingBagCollection(tables=tables, device=local_device))
model.to(local_device)
model.training = False
pruning_ebc_dict: Dict[(str, torch.Tensor)] = {}
table_0_spec = table_specs[0]
table_0_num_emb: int = table_0_spec[0]
table_0_emb_dim: int = table_0_spec[1]
table_0_remapping_indices = torch.full(fill_value=(- 1), size=[table_0_num_emb], dtype=torch.int32)
for i in range(200):
if ((i % 2) == 0):
continue
table_0_remapping_indices[i] = (i // 2)
pruning_ebc_dict['table_0'] = table_0_remapping_indices
setattr(model[0], MODULE_ATTR_EMB_CONFIG_NAME_TO_PRUNING_INDICES_REMAPPING_DICT, pruning_ebc_dict)
quant_state_dict_split_scale_bias = True
quant_model = quantize(module=model, inplace=False, quant_state_dict_split_scale_bias=quant_state_dict_split_scale_bias)
quant_model = quant_model[0]
sharder = TestQuantEBCSharder(sharding_type=ShardingType.TABLE_WISE.value, kernel_type=EmbeddingComputeKernel.QUANT.value, shardable_params=[table.name for table in tables])
quant_model_copy = copy.deepcopy(quant_model)
topology: Topology = Topology(world_size=world_size, compute_device='cuda')
planner = EmbeddingShardingPlanner(topology=topology, batch_size=batch_size, enumerator=EmbeddingEnumerator(topology=topology, batch_size=batch_size, estimator=[EmbeddingPerfEstimator(topology=topology, is_inference=True), EmbeddingStorageEstimator(topology=topology)]))
plan = planner.plan(quant_model_copy, [sharder])
sharded_model = _shard_modules(module=quant_model_copy, sharders=[sharder], device=local_device, plan=plan, env=ShardingEnv.from_local(world_size=2, rank=0))
sharded_model.load_state_dict(quant_model.state_dict())
kjt = KeyedJaggedTensor.from_lengths_sync(keys=['feature_0'], values=torch.tensor([0, 1, 2], dtype=torch.int32).cuda(), lengths=torch.tensor([1, 1, 1], dtype=torch.int32).cuda(), weights=None)
q_output = quant_model(kjt)
s_output = sharded_model(kjt)
assert_close(q_output['feature_0'], s_output['feature_0'])
assert_close(q_output['feature_0'][0], torch.tensor(([0.0] * table_0_emb_dim)))
assert_close(q_output['feature_0'][2], torch.tensor(([0.0] * table_0_emb_dim)))
((torch.cuda.device_count() <= 1), 'Not enough GPUs available')
def test_qebc_pruned_cw(self) -> None:
batch_size: int = 4
world_size = 2
local_device = torch.device('cuda:0')
num_embedding = 200
emb_dim = 512
pruned_entry = 100
table_specs: List[Tuple[(int, int, int)]] = [(num_embedding, emb_dim, (num_embedding - pruned_entry))]
mi = create_test_model(num_embedding, emb_dim, world_size, batch_size, dense_device=local_device, sparse_device=local_device, quant_state_dict_split_scale_bias=True, num_features=len(table_specs))
pruning_ebc_dict: Dict[(str, torch.Tensor)] = {}
table_0_spec = table_specs[0]
table_0_num_emb: int = table_0_spec[0]
table_0_pruned_num_emb: int = table_0_spec[2]
table_0_remapping_indices = torch.full(fill_value=(- 1), size=[table_0_num_emb], dtype=torch.int32)
table_0_remapping_indices[(- table_0_pruned_num_emb):] = torch.arange(table_0_pruned_num_emb, dtype=torch.int32)
pruning_ebc_dict['table_0'] = table_0_remapping_indices
quant_model = prune_and_quantize_model(mi.model, pruning_ebc_dict)
mi.quant_model = quant_model
expected_shards = [[((0, 0, table_specs[0][2], (table_specs[0][1] // 4)), 'rank:0/cuda:0'), ((0, 128, table_specs[0][2], (table_specs[0][1] // 4)), 'rank:1/cuda:1'), ((0, 256, table_specs[0][2], (table_specs[0][1] // 4)), 'rank:0/cuda:0'), ((0, 384, table_specs[0][2], (table_specs[0][1] // 4)), 'rank:1/cuda:1')]]
sharded_model = shard_qebc(mi, sharding_type=ShardingType.COLUMN_WISE, device=local_device, expected_shards=expected_shards)
inputs = [model_input_to_forward_args(inp.to(local_device)) for inp in prep_inputs(mi, world_size, batch_size, long_indices=False)]
sharded_model.load_state_dict(quant_model.state_dict())
quant_output = quant_model(*inputs[0])
sharded_output = sharded_model(*inputs[0])
assert_close(quant_output, sharded_output)
gm: torch.fx.GraphModule = symbolic_trace(sharded_model)
gm_script = torch.jit.script(gm)
buffer = io.BytesIO()
torch.jit.save(gm_script, buffer)
buffer.seek(0)
loaded_gm_script = torch.jit.load(buffer)
gm_script_output = loaded_gm_script(*inputs[0])
assert_close(quant_output, gm_script_output)
weights_spec: Dict[(str, WeightSpec)] = sharded_tbes_weights_spec(sharded_model)
assert_weight_spec(weights_spec, expected_shards, '_module.sparse.ebc', 'embedding_bags', ['table_0'], ShardingType.COLUMN_WISE.value)
((torch.cuda.device_count() <= 1), 'Not enough GPUs available')
def test_qebc_pruned_cw_one_ebc(self) -> None:
batch_size: int = 1
table_specs: List[Tuple[(int, int, int)]] = [(200, 512, 100)]
world_size = 2
local_device = torch.device('cuda:0')
topology: Topology = Topology(world_size=world_size, compute_device=local_device.type)
tables = [EmbeddingBagConfig(num_embeddings=num_emb, embedding_dim=emb_dim, name=('table_' + str(i)), feature_names=[('feature_' + str(i))]) for (i, (num_emb, emb_dim, _)) in enumerate(table_specs)]
model = torch.nn.Sequential(EmbeddingBagCollection(tables=tables, device=local_device))
model.to(local_device)
model.training = False
pruning_ebc_dict: Dict[(str, torch.Tensor)] = {}
table_0_spec = table_specs[0]
table_0_num_emb: int = table_0_spec[0]
table_0_emb_dim: int = table_0_spec[1]
table_0_remapping_indices = torch.full(fill_value=(- 1), size=[table_0_num_emb], dtype=torch.int32)
for i in range(200):
if ((i % 2) == 0):
continue
table_0_remapping_indices[i] = (i // 2)
pruning_ebc_dict['table_0'] = table_0_remapping_indices
setattr(model[0], MODULE_ATTR_EMB_CONFIG_NAME_TO_PRUNING_INDICES_REMAPPING_DICT, pruning_ebc_dict)
quant_state_dict_split_scale_bias = True
quant_model = quantize(module=model, inplace=False, quant_state_dict_split_scale_bias=quant_state_dict_split_scale_bias)
quant_model = quant_model[0]
sharder = TestQuantEBCSharder(sharding_type=ShardingType.COLUMN_WISE.value, kernel_type=EmbeddingComputeKernel.QUANT.value, shardable_params=[table.name for table in tables])
quant_model_copy = copy.deepcopy(quant_model)
topology: Topology = Topology(world_size=world_size, compute_device='cuda')
planner = EmbeddingShardingPlanner(topology=topology, batch_size=batch_size, enumerator=EmbeddingEnumerator(topology=topology, batch_size=batch_size, estimator=[EmbeddingPerfEstimator(topology=topology, is_inference=True), EmbeddingStorageEstimator(topology=topology)]))
plan = planner.plan(quant_model_copy, [sharder])
sharded_model = _shard_modules(module=quant_model_copy, sharders=[sharder], device=local_device, plan=plan, env=ShardingEnv.from_local(world_size=2, rank=0))
sharded_model.load_state_dict(quant_model.state_dict())
kjt = KeyedJaggedTensor.from_lengths_sync(keys=['feature_0'], values=torch.tensor([0, 1, 2, 197, 198, 199], dtype=torch.int32).cuda(), lengths=torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.int32).cuda(), weights=None)
q_output = quant_model(kjt)
s_output = sharded_model(kjt)
assert_close(q_output['feature_0'], s_output['feature_0'])
assert_close(q_output['feature_0'][0], torch.tensor(([0.0] * table_0_emb_dim)))
assert_close(q_output['feature_0'][2], torch.tensor(([0.0] * table_0_emb_dim)))
assert_close(q_output['feature_0'][4], torch.tensor(([0.0] * table_0_emb_dim))) |
def is_user_an_admin():
import os
if (os.name == 'nt'):
try:
os.listdir(os.sep.join([os.environ.get('SystemRoot', 'C:\\windows'), 'temp']))
except Exception:
return False
else:
return True
else:
return (('SUDO_USER' in os.environ) and (os.geteuid() == 0)) |
class AppBuildTelemetry(BaseModel, extra='forbid'):
name: str = Field(..., description='')
version: str = Field(..., description='')
features: Optional['AppFeaturesTelemetry'] = Field(default=None, description='')
system: Optional['RunningEnvironmentTelemetry'] = Field(default=None, description='')
startup: datetime = Field(..., description='') |
def create_manifest_for_testing(repository, differentiation_field='1', include_shared_blob=False):
layer_json = json.dumps({'config': {}, 'rootfs': {'type': 'layers', 'diff_ids': []}, 'history': []})
(_, config_digest) = _populate_blob(layer_json)
remote_digest = sha256_digest(b'something')
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(config_digest, len(layer_json.encode('utf-8')))
builder.add_layer(remote_digest, 1234, urls=[(' + differentiation_field)])
if include_shared_blob:
(_, blob_digest) = _populate_blob('some data here')
builder.add_layer(blob_digest, 4567)
manifest = builder.build()
created = get_or_create_manifest(repository, manifest, storage, raise_on_error=True)
assert created
return (created.manifest, manifest) |
def get_metadata(path: str) -> 'Metadata':
parsed = mutagen.File(path, easy=True)
if (parsed is None):
raise ValueError
metadata: 'Metadata' = {}
if (parsed.tags is not None):
if ('artist' in parsed.tags):
metadata['artist'] = parsed.tags['artist'][0]
if ('title' in parsed.tags):
metadata['title'] = parsed.tags['title'][0]
if ('artist' not in metadata):
metadata['artist'] = ''
if ('title' not in metadata):
metadata['title'] = os.path.split(path)[1]
if ((parsed.info is not None) and (parsed.info.length is not None)):
metadata['duration'] = parsed.info.length
else:
metadata['duration'] = (- 1)
metadata['cached'] = True
return metadata |
def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir):
output_filename = construct_video_filename(row, label_to_dir, trim_format)
clip_id = os.path.basename(output_filename).split('.mp4')[0]
if os.path.exists(output_filename):
status = tuple([clip_id, True, 'Exists'])
return status
(downloaded, log) = download_clip(row['video-id'], output_filename, row['start-time'], row['end-time'], tmp_dir=tmp_dir)
status = tuple([clip_id, downloaded, log])
return status |
def get_peft_model_state_dict(model, state_dict=None, adapter_name='default'):
config = model.peft_config[adapter_name]
if (state_dict is None):
state_dict = model.state_dict()
if (config.peft_type in (PeftType.LORA, PeftType.ADALORA)):
bias = config.bias
if (bias == 'none'):
to_return = {k: state_dict[k] for k in state_dict if ('lora_' in k)}
elif (bias == 'all'):
to_return = {k: state_dict[k] for k in state_dict if (('lora_' in k) or ('bias' in k))}
elif (bias == 'lora_only'):
to_return = {}
for k in state_dict:
if ('lora_' in k):
to_return[k] = state_dict[k]
bias_name = (k.split('lora_')[0] + 'bias')
if (bias_name in state_dict):
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
to_return = {k: v for (k, v) in to_return.items() if ((('lora_' in k) and (adapter_name in k)) or ('bias' in k))}
if (config.peft_type == PeftType.ADALORA):
rank_pattern = config.rank_pattern
if (rank_pattern is not None):
rank_pattern = {k.replace(f'.{adapter_name}', ''): v for (k, v) in rank_pattern.items()}
config.rank_pattern = rank_pattern
to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)
elif (config.peft_type == PeftType.ADAPTION_PROMPT):
to_return = {k: state_dict[k] for k in state_dict if k.split('.')[(- 1)].startswith('adaption_')}
elif isinstance(config, PromptLearningConfig):
to_return = {}
if config.inference_mode:
prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
else:
prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)
to_return['prompt_embeddings'] = prompt_embeddings
else:
raise NotImplementedError
if (model.modules_to_save is not None):
for (key, value) in state_dict.items():
if any(((f'{module_name}.modules_to_save.{adapter_name}' in key) for module_name in model.modules_to_save)):
to_return[key.replace('modules_to_save.', '')] = value
to_return = {k.replace(f'.{adapter_name}', ''): v for (k, v) in to_return.items()}
return to_return |
def path_typed_attrs(draw: DrawFn, defaults: Optional[bool]=None, kw_only: Optional[bool]=None) -> Tuple[(_CountingAttr, SearchStrategy[Path])]:
from string import ascii_lowercase
default = NOTHING
if ((defaults is True) or ((defaults is None) and draw(booleans()))):
default = Path(draw(text(ascii_lowercase, min_size=1)))
return (field(type=Path, default=default, kw_only=(draw(booleans()) if (kw_only is None) else kw_only)), text(ascii_lowercase, min_size=1).map(Path)) |
def test_update_questionsets(db, settings):
xml_file = (((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'questionsets.xml')
root = read_xml_file(xml_file)
version = root.attrib.get('version')
elements = flat_xml_to_elements(root)
elements = convert_elements(elements, version)
elements = order_elements(elements)
elements = elements.values()
import_elements(elements)
assert (len(root) == 10)
assert all(((element['created'] is False) for element in elements))
assert all(((element['updated'] is True) for element in elements)) |
def test_installer_required_extras_should_not_be_removed_when_updating_single_dependency_pypi_repository(locker: Locker, repo: Repository, package: ProjectPackage, installed: CustomInstalledRepository, env: NullEnv, mocker: MockerFixture, config: Config) -> None:
mocker.patch('sys.platform', 'darwin')
pool = RepositoryPool()
pool.add_repository(MockRepository())
installer = Installer(NullIO(), env, package, locker, pool, config, installed=installed, executor=Executor(env, pool, config, NullIO()))
package.add_dependency(Factory.create_dependency('poetry', {'version': '^0.12.0'}))
installer.update(True)
result = installer.run()
assert (result == 0)
assert (installer.executor.installations_count == 3)
assert (installer.executor.updates_count == 0)
assert (installer.executor.removals_count == 0)
package.add_dependency(Factory.create_dependency('pytest', '^3.5'))
locker.locked(True)
locker.mock_lock_data(locker.written_data)
assert isinstance(installer.executor, Executor)
for pkg in installer.executor.installations:
installed.add_package(pkg)
installer = Installer(NullIO(), env, package, locker, pool, config, installed=installed, executor=Executor(env, pool, config, NullIO()))
installer.update(True)
installer.whitelist(['pytest'])
result = installer.run()
assert (result == 0)
assert (installer.executor.installations_count == 7)
assert (installer.executor.updates_count == 0)
assert (installer.executor.removals_count == 0) |
class TestSwitchInlineQueryChosenChat(TestSwitchInlineQueryChosenChatBase):
def test_slot_behaviour(self, switch_inline_query_chosen_chat):
inst = switch_inline_query_chosen_chat
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, switch_inline_query_chosen_chat):
assert (switch_inline_query_chosen_chat.query == self.query)
assert (switch_inline_query_chosen_chat.allow_user_chats == self.allow_user_chats)
assert (switch_inline_query_chosen_chat.allow_bot_chats == self.allow_bot_chats)
assert (switch_inline_query_chosen_chat.allow_channel_chats == self.allow_channel_chats)
assert (switch_inline_query_chosen_chat.allow_group_chats == self.allow_group_chats)
def test_to_dict(self, switch_inline_query_chosen_chat):
siqcc = switch_inline_query_chosen_chat.to_dict()
assert isinstance(siqcc, dict)
assert (siqcc['query'] == switch_inline_query_chosen_chat.query)
assert (siqcc['allow_user_chats'] == switch_inline_query_chosen_chat.allow_user_chats)
assert (siqcc['allow_bot_chats'] == switch_inline_query_chosen_chat.allow_bot_chats)
assert (siqcc['allow_channel_chats'] == switch_inline_query_chosen_chat.allow_channel_chats)
assert (siqcc['allow_group_chats'] == switch_inline_query_chosen_chat.allow_group_chats)
def test_equality(self):
siqcc = SwitchInlineQueryChosenChat
a = siqcc(self.query, self.allow_user_chats, self.allow_bot_chats)
b = siqcc(self.query, self.allow_user_chats, self.allow_bot_chats)
c = siqcc(self.query, self.allow_user_chats)
d = siqcc('', self.allow_user_chats, self.allow_bot_chats)
e = siqcc(self.query, self.allow_user_chats, self.allow_bot_chats, self.allow_group_chats)
assert (a == b)
assert (hash(a) == hash(b))
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e)) |
.parametrize('\n repository,\n day,\n count_response, expected_request, expected_count, throws\n ', [pytest.param(FAKE_REPOSITORIES['user1/repo1'], parse('2018-03-08').date(), COUNT_RESPONSE, COUNT_REQUEST, 1, False, id='Valid Count with 1 as result')])
def test_count_repository_actions(repository, day, count_response, expected_request, expected_count, throws, logs_model, mock_elasticsearch, mock_db_model, app_config):
mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
mock_elasticsearch.count = Mock(return_value=count_response)
mock_elasticsearch.list_indices = Mock(return_value=INDEX_LIST_RESPONSE)
configure(app_config)
if throws:
with pytest.raises(Exception):
logs_model.count_repository_actions(repository, day)
else:
count = logs_model.count_repository_actions(repository, day)
assert (count == expected_count)
if expected_request:
mock_elasticsearch.count.assert_called_with(expected_request) |
class TalkieRoot(Talkie):
def __init__(self, **kwargs):
self._listeners = listdict()
Talkie.__init__(self, **kwargs)
def talkie_connect(self, path, listener):
connection = TalkieConnection(self, path, listener)
self._listeners[path].append(connection._ref_listener)
return connection
def talkie_disconnect(self, connection):
try:
self._listeners[connection._path].remove(connection._ref_listener)
except ValueError:
pass
def fire_event(self, path, value):
path = '.'.join(path[::(- 1)])
parts = path.split('.')
for i in range((len(parts) + 1)):
subpath = '.'.join(parts[:i])
target_refs = self._listeners[subpath]
delete = []
for target_ref in target_refs:
target = target_ref()
if target:
target(path, value)
else:
delete.append(target_ref)
for target_ref in delete:
target_refs.remove(target_ref)
def get(self, path):
x = self
for s in path.split('.'):
x = getattr(x, s)
return x
def set(self, path, value):
x = self
p = path.split('.')
for s in p[:(- 1)]:
x = getattr(x, s)
setattr(x, p[(- 1)], value) |
(frozen=True)
class PrimePerGameOptions(PerGameOptions):
input_path: (Path | None) = None
output_directory: (Path | None) = None
output_format: str = 'iso'
use_external_models: set[RandovaniaGame] = dataclasses.field(default_factory=set)
def as_json(self):
return {**super().as_json, 'input_path': (str(self.input_path) if (self.input_path is not None) else None), 'output_directory': (str(self.output_directory) if (self.output_directory is not None) else None), 'output_format': self.output_format, 'use_external_models': [game.value for game in self.use_external_models]}
def from_json(cls, value: dict) -> PrimePerGameOptions:
game = RandovaniaGame.METROID_PRIME
cosmetic_patches = game.data.layout.cosmetic_patches.from_json(value['cosmetic_patches'])
return cls(cosmetic_patches=cosmetic_patches, input_path=decode_if_not_none(value['input_path'], Path), output_directory=decode_if_not_none(value['output_directory'], Path), output_format=value['output_format'], use_external_models={RandovaniaGame(g) for g in value['use_external_models']}) |
class TrainNetwork(object):
def __init__(self, args):
super(TrainNetwork, self).__init__()
self.args = args
self.dur_time = 0
self._init_log()
self._init_device()
self._init_data_queue()
self._init_model()
def _init_log(self):
self.args.save = (((('./logs/eval/' + self.args.arch) + '/') + 'cifar10') + '/eval-{}-{}'.format(self.args.save, time.strftime('%Y%m%d-%H%M')))
dutils.create_exp_dir(self.args.save, scripts_to_save=None)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(self.args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
self.logger = logging.getLogger('Architecture Training')
self.logger.addHandler(fh)
def _init_device(self):
if (not torch.cuda.is_available()):
self.logger.info('no gpu device available')
sys.exit(1)
np.random.seed(self.args.seed)
self.device_id = self.args.gpu
self.device = torch.device('cuda:{}'.format((0 if self.args.multi_gpus else self.device_id)))
cudnn.benchmark = True
torch.manual_seed(self.args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(self.args.seed)
logging.info(('gpu device = %d' % self.args.gpu))
logging.info('args = %s', self.args)
def _init_data_queue(self):
(train_transform, valid_transform) = dutils._data_transforms_cifar(args)
if (self.args.dataset == 'cifar10'):
train_data = dset.CIFAR10(root=self.args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=self.args.data, train=False, download=True, transform=valid_transform)
self.num_classes = 10
elif (self.args.dataset == 'cifar100'):
train_data = dset.CIFAR100(root=self.args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=self.args.data, train=False, download=True, transform=valid_transform)
self.num_classes = 100
self.train_queue = torch.utils.data.DataLoader(train_data, batch_size=self.args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
self.valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=self.args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
def _init_model(self):
genotype = eval(('genotypes.%s' % self.args.arch))
model = Network(self.args.init_channels, self.num_classes, self.args.layers, self.args.auxiliary, genotype, self.args.parse_method)
(flops, params) = profile(model, inputs=(torch.randn(1, 3, 32, 32),), verbose=False)
self.logger.info('flops = %fM', (flops / 1000000.0))
self.logger.info('param size = %fM', (params / 1000000.0))
if ((torch.cuda.device_count() > 1) and self.args.multi_gpus):
self.logger.info('use: %d gpus', torch.cuda.device_count())
model = nn.DataParallel(model)
else:
self.logger.info(('gpu device = %d' % self.device_id))
torch.cuda.set_device(self.device_id)
self.model = model.to(self.device)
criterion = nn.CrossEntropyLoss()
self.criterion = criterion.to(self.device)
self.optimizer = torch.optim.SGD(model.parameters(), self.args.learning_rate, momentum=self.args.momentum, weight_decay=self.args.weight_decay)
self.best_acc_top1 = 0
if self.args.resume:
if os.path.isfile(self.args.resume):
print('=> loading checkpoint {}'.format(self.args.resume))
checkpoint = torch.load(self.args.resume, map_location=self.device)
self.dur_time = checkpoint['dur_time']
self.args.start_epoch = checkpoint['epoch']
self.best_acc_top1 = checkpoint['best_acc_top1']
self.args.drop_path_prob = checkpoint['drop_path_prob']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(self.args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(self.args.resume))
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, float(self.args.epochs), eta_min=0, last_epoch=((- 1) if (self.args.start_epoch == 0) else self.args.start_epoch))
if (self.args.resume and os.path.isfile(self.args.resume)):
checkpoint = torch.load(self.args.resume)
self.scheduler.load_state_dict(checkpoint['scheduler'])
def run(self):
self.logger.info('args = %s', self.args)
run_start = time.time()
for epoch in range(self.args.start_epoch, self.args.epochs):
self.scheduler.step()
self.logger.info('epoch % d / %d lr %e', epoch, self.args.epochs, self.scheduler.get_lr()[0])
self.model.drop_path_prob = ((self.args.drop_path_prob * epoch) / self.args.epochs)
(train_acc, train_obj) = self.train()
self.logger.info('train loss %e, train acc %f', train_obj, train_acc)
(valid_acc_top1, valid_acc_top5, valid_obj) = self.infer()
self.logger.info('valid loss %e, top1 valid acc %f top5 valid acc %f', valid_obj, valid_acc_top1, valid_acc_top5)
self.logger.info('best valid acc %f', self.best_acc_top1)
is_best = False
if (valid_acc_top1 > self.best_acc_top1):
self.best_acc_top1 = valid_acc_top1
is_best = True
dutils.save_checkpoint({'epoch': (epoch + 1), 'dur_time': ((self.dur_time + time.time()) - run_start), 'state_dict': self.model.state_dict(), 'drop_path_prob': self.args.drop_path_prob, 'best_acc_top1': self.best_acc_top1, 'optimizer': self.optimizer.state_dict(), 'scheduler': self.scheduler.state_dict()}, is_best, self.args.save)
self.logger.info('train epoches %d, best_acc_top1 %f, dur_time %s', self.args.epochs, self.best_acc_top1, dutils.calc_time(((self.dur_time + time.time()) - run_start)))
def train(self):
objs = dutils.AvgrageMeter()
top1 = dutils.AvgrageMeter()
top5 = dutils.AvgrageMeter()
self.model.train()
for (step, (input, target)) in enumerate(self.train_queue):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
self.optimizer.zero_grad()
(logits, logits_aux) = self.model(input)
loss = self.criterion(logits, target)
if self.args.auxiliary:
loss_aux = self.criterion(logits_aux, target)
loss += (self.args.auxiliary_weight * loss_aux)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.grad_clip)
self.optimizer.step()
(prec1, prec5) = dutils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if ((step % self.args.report_freq) == 0):
self.logger.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return (top1.avg, objs.avg)
def infer(self):
objs = dutils.AvgrageMeter()
top1 = dutils.AvgrageMeter()
top5 = dutils.AvgrageMeter()
self.model.eval()
with torch.no_grad():
for (step, (input, target)) in enumerate(self.valid_queue):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
(logits, _) = self.model(input)
loss = self.criterion(logits, target)
(prec1, prec5) = dutils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if ((step % self.args.report_freq) == 0):
self.logger.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return (top1.avg, top5.avg, objs.avg) |
class TestSyntheticLocate():
def setup_method(self) -> None:
lattice = spaghetti.regular_lattice((0, 0, 10, 10), 9, exterior=True)
ntw = spaghetti.Network(in_data=lattice)
gdf = spaghetti.element_as_gdf(ntw, arcs=True)
street = geopandas.GeoDataFrame(geopandas.GeoSeries(gdf['geometry'].buffer(0.2).unary_union), crs=gdf.crs, columns=['geometry'])
client_count = 5
facility_count = 2
self.client_points = simulated_geo_points(street, needed=client_count, seed=5)
self.facility_points = simulated_geo_points(street, needed=facility_count, seed=6)
ntw = spaghetti.Network(in_data=lattice)
ntw.snapobservations(self.client_points, 'clients', attribute=True)
ntw.snapobservations(self.facility_points, 'facilities', attribute=True)
self.clients_snapped = spaghetti.element_as_gdf(ntw, pp_name='clients', snapped=True)
self.facilities_snapped = spaghetti.element_as_gdf(ntw, pp_name='facilities', snapped=True)
self.cost_matrix = ntw.allneighbordistances(sourcepattern=ntw.pointpatterns['clients'], destpattern=ntw.pointpatterns['facilities'])
def test_clscpso_y1_lt_y2(self):
service_radius = 8
facility_capacity = numpy.array([5, 15])
demand_quantity = numpy.arange(1, 6)
clscpso = LSCP.from_cost_matrix(self.cost_matrix, service_radius, facility_capacity_arr=facility_capacity, demand_quantity_arr=demand_quantity)
result = clscpso.solve(pulp.PULP_CBC_CMD(msg=False))
assert isinstance(result, LSCP)
known = [[1], [1], [1], [1], [1]]
observed = clscpso.cli2fac
assert (known == observed)
known = [[], [0, 1, 2, 3, 4]]
observed = clscpso.fac2cli
assert (known == observed)
def test_clscpso_y1_gt_y2(self):
service_radius = 8
facility_capacity = numpy.array([15, 5])
demand_quantity = numpy.arange(1, 6)
clscpso = LSCP.from_cost_matrix(self.cost_matrix, service_radius, facility_capacity_arr=facility_capacity, demand_quantity_arr=demand_quantity)
result = clscpso.solve(pulp.PULP_CBC_CMD(msg=False))
assert isinstance(result, LSCP)
known = [[1], [1], [0, 1], [0, 1], [0, 1]]
observed = clscpso.cli2fac
assert (known == observed)
known = [[2, 3, 4], [0, 1, 2, 3, 4]]
observed = clscpso.fac2cli
assert (known == observed)
def test_clscpso_y1_eq_y2(self):
service_radius = 7
facility_capacity = numpy.array([8, 8])
demand_quantity = numpy.arange(1, 6)
clscpso = LSCP.from_cost_matrix(self.cost_matrix, service_radius, facility_capacity_arr=facility_capacity, demand_quantity_arr=demand_quantity)
result = clscpso.solve(pulp.PULP_CBC_CMD(msg=False))
assert isinstance(result, LSCP)
known = [[1], [1], [0, 1], [0], [1]]
observed = clscpso.cli2fac
assert (known == observed)
known = [[2, 3], [0, 1, 2, 4]]
observed = clscpso.fac2cli
assert (known == observed)
def test_clscpso_dem_gt_cap_error(self):
service_radius = 8
facility_capacity = numpy.array([8, 8])
demand_quantity = numpy.arange(5, 10)
with pytest.raises(ValueError, match='Infeasible model. Demand greater than capacity'):
LSCP.from_cost_matrix(self.cost_matrix, service_radius, facility_capacity_arr=facility_capacity, demand_quantity_arr=demand_quantity)
def test_clscpso_infease_error(self):
service_radius = 1
facility_capacity = numpy.array([5, 15])
demand_quantity = numpy.arange(1, 6)
clscpso = LSCP.from_cost_matrix(self.cost_matrix, service_radius, facility_capacity_arr=facility_capacity, demand_quantity_arr=demand_quantity)
with pytest.raises(RuntimeError, match='Model is not solved:'):
clscpso.solve(pulp.PULP_CBC_CMD(msg=False)) |
class ConvNet(nn.Module):
def __init__(self, input_dim, output_dim):
super(ConvNet, self).__init__()
(c, h, w) = input_dim
self.conv_1 = nn.Conv2d(in_channels=c, out_channels=32, kernel_size=8, stride=4)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
self.conv_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
self.relu = nn.ReLU()
self.flatten = nn.Flatten()
self.dense = nn.Linear(3136, 512)
self.output = nn.Linear(512, output_dim)
def forward(self, input):
x = input
x = self.conv_1(x)
x = self.relu(x)
x = self.conv_2(x)
x = self.relu(x)
x = self.conv_3(x)
x = self.relu(x)
x = self.flatten(x)
x = self.dense(x)
x = self.relu(x)
x = self.output(x)
return x |
def handle_code(code, vk_packet=True):
code_keys = []
if (code in CODES):
code_keys.append(KeyAction(CODES[code]))
elif (len(code) == 1):
code_keys.append(KeyAction(code))
elif (' ' in code):
(to_repeat, count) = code.rsplit(None, 1)
if (to_repeat == 'PAUSE'):
try:
pause_time = float(count)
except ValueError:
raise KeySequenceError('invalid pause time {}'.format(count))
code_keys.append(PauseAction(pause_time))
else:
try:
count = int(count)
except ValueError:
raise KeySequenceError('invalid repetition count {}'.format(count))
if (to_repeat in CODES):
code_keys.extend(([KeyAction(CODES[to_repeat])] * count))
else:
to_repeat = parse_keys(to_repeat)
if isinstance(to_repeat, list):
keys = (to_repeat * count)
else:
keys = ([to_repeat] * count)
code_keys.extend(keys)
else:
raise RuntimeError('Unknown code: {}'.format(code))
return code_keys |
def get_task(args):
task_name = args.task_name
data_cache_dir = args.data_cache_dir
if (task_name == 'mnli'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
mnli_datasets = load_dataset('glue', 'mnli', cache_dir=data_cache_dir)
total_train_examples = [e for e in mnli_datasets['train']]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_mnli_examples(total_train_examples)
total_eval_examples = [e for e in mnli_datasets['validation_matched']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_mnli_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f'''{example['premise']}. Based on that information, is the claim {example['hypothesis']} "True", "False", or "Inconclusive"?
answer:''', f"{label_map[example['label']]}")
all_train_text_to_encode = ['{}. Based on that information, is the claim {} "True", "False", or "Inconclusive"?'.format(raw_item['premise'], raw_item['hypothesis']) for raw_item in total_train_examples]
all_eval_text_to_encode = ['{}. Based on that information, is the claim {} "True", "False", or "Inconclusive"?'.format(raw_item['premise'], raw_item['hypothesis']) for raw_item in total_eval_examples]
label_map = {0: 'True', 1: 'Inconclusive', 2: 'False'}
elif (task_name == 'rte'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
rte_datasets = load_dataset('glue', 'rte', cache_dir=data_cache_dir)
total_train_examples = [e for e in rte_datasets['train']]
total_train_examples = process_rte_examples(total_train_examples)
total_eval_examples = [e for e in rte_datasets['validation']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_rte_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f'''{example['sentence1']}.
question: {example['sentence2']}. True or False?
answer:''', f"{label_map[example['label']]}")
all_train_text_to_encode = ['{}.\nquestion: {}'.format(raw_item['sentence1'], raw_item['sentence2']) for raw_item in total_train_examples]
all_eval_text_to_encode = ['{}.\nquestion: {}'.format(raw_item['sentence1'], raw_item['sentence2']) for raw_item in total_eval_examples]
label_map = {0: 'True', 1: 'False'}
elif (task_name == 'sst5'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
sst5_datasets = load_dataset('SetFit/sst5', cache_dir=data_cache_dir)
total_train_examples = [e for e in sst5_datasets['train']]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_sst5_examples(total_train_examples)
total_eval_examples = [e for e in sst5_datasets['test']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_sst5_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f'''How do you feel about the following sentence?
{example['text']}
answer:''', f"{label_map[example['label']]}")
all_train_text_to_encode = [raw_item['text'] for raw_item in total_train_examples]
all_eval_text_to_encode = [raw_item['text'] for raw_item in total_eval_examples]
label_map = {0: 'very negative', 1: 'negative', 2: 'neutral', 3: 'positive', 4: 'very positive'}
elif (task_name == 'mrpc'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
mrpc_datasets = load_dataset('glue', 'mrpc', cache_dir=data_cache_dir)
total_train_examples = [e for e in mrpc_datasets['train']]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_mrpc_examples(total_train_examples)
total_eval_examples = [e for e in mrpc_datasets['validation']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_mrpc_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f'''Are the following two sentences 'equivalent' or 'not equivalent'?
{example['sentence1']}.
{example['sentence2']}.
answer:''', f"{label_map[example['label']]}")
all_train_text_to_encode = ['{}.\n{}'.format(raw_item['sentence1'], raw_item['sentence2']) for raw_item in total_train_examples]
all_eval_text_to_encode = ['{}.\n{}'.format(raw_item['sentence1'], raw_item['sentence2']) for raw_item in total_eval_examples]
label_map = {0: 'not equivalent', 1: 'equivalent'}
elif (task_name == 'dbpedia_14'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
dbpedia_datasets = load_dataset('dbpedia_14', revision='master', cache_dir=data_cache_dir)
total_train_examples = [e for e in dbpedia_datasets['train']]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_dbpedia_examples(total_train_examples)
total_eval_examples = [e for e in dbpedia_datasets['test']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_dbpedia_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f"title: {example['title']}; content: {example['content']}", f"{label_map[example['label']]}")
all_train_text_to_encode = ['title: {} ; content: {}'.format(raw_item['title'], raw_item['content']) for raw_item in total_train_examples]
all_eval_text_to_encode = ['title: {} ; content: {}'.format(raw_item['title'], raw_item['content']) for raw_item in total_eval_examples]
label_map = {0: 'company', 1: 'educational institution', 2: 'artist', 3: 'athlete', 4: 'office holder', 5: 'mean of transportation', 6: 'building', 7: 'natural place', 8: 'village', 9: 'animal', 10: 'plant', 11: 'album', 12: 'film', 13: 'written work'}
elif (task_name == 'hellaswag'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
hellaswag_datasets = load_dataset('hellaswag', cache_dir=data_cache_dir)
total_train_examples = [e for e in hellaswag_datasets['train']]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_hellaswag_examples(total_train_examples)
total_eval_examples = [e for e in hellaswag_datasets['validation']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_hellaswag_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f"The topic is {example['activity_label']}. {example['ctx_a']} {example['ctx_b']} ", f"{example['endings'][example['label']]}")
all_train_text_to_encode = [f"The topic is {raw_item['activity_label']}. {raw_item['ctx_a']} {raw_item['ctx_b']} | {raw_item['endings'][0]} | {raw_item['endings'][1]} | {raw_item['endings'][2]} | {raw_item['endings'][3]}" for raw_item in total_train_examples]
all_eval_text_to_encode = [f"The topic is {raw_item['activity_label']}. {raw_item['ctx_a']} {raw_item['ctx_b']} | {raw_item['endings'][0]} | {raw_item['endings'][1]} | {raw_item['endings'][2]} | {raw_item['endings'][3]}" for raw_item in total_eval_examples]
label_map = None
elif (task_name == 'xsum'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
xsum_dataset = load_dataset('xsum', cache_dir=data_cache_dir)
total_train_examples = [e for e in xsum_dataset['train']]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_xsum_examples(total_train_examples)
total_eval_examples = [e for e in xsum_dataset['test']]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_xsum_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
return (f'''write a short summary:
{example['document']}
TL;DR:''', f"{example['summary']}")
all_train_text_to_encode = [raw_item['document'] for raw_item in total_train_examples]
all_eval_text_to_encode = [raw_item['document'] for raw_item in total_eval_examples]
label_map = None
elif (task_name == 'nq'):
if (os.path.isfile(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) and os.path.isfile(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'))):
print('use cached examples')
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json')) as f:
total_train_examples = json.load(f)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json')) as f:
total_eval_examples = json.load(f)
else:
nq_dataset = load_dataset('natural_questions', cache_dir=data_cache_dir)
first_sub_sample_indices = random.sample(range(len(nq_dataset['train'])), 12000)
train_data = nq_dataset['train'].select(first_sub_sample_indices).map(format_dataset)
total_train_examples = train_data.remove_columns(['annotations', 'document', 'id']).filter((lambda x: (x['category'] != 'null')))
total_train_examples = [e for e in total_train_examples]
total_train_examples = random.sample(total_train_examples, 3000)
total_train_examples = process_nq_examples(total_train_examples)
total_eval_examples = nq_dataset['validation'].map(format_dataset).remove_columns(['annotations', 'document', 'id']).filter((lambda x: (x['category'] != 'null')))
total_eval_examples = [e for e in total_eval_examples]
total_eval_examples = random.sample(total_eval_examples, 256)
total_eval_examples = process_nq_examples(total_eval_examples)
with open(os.path.join(args.output_dir, f'train_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_train_examples, f, indent=4)
with open(os.path.join(args.output_dir, f'eval_examples_seed_{args.seed}.json'), 'w') as f:
json.dump(total_eval_examples, f, indent=4)
if args.debug:
args.annotation_size = 10
args.batch_size = 1
total_train_examples = total_train_examples[:50]
total_eval_examples = total_eval_examples[:5]
def format_example(example, label_map, **kwargs):
if (example['category'] in ['yes', 'no']):
return (f'''Write an answer: {example['question']}
class''', f"{example['category']}")
assert (example['category'] == 'other'), example['category']
assert (len(example['short_targets']) > 0), f"{example['short_targets']}"
return (f'''Write an answer: {example['question']}
{example['category']} ''', f"{example['short_targets'][0]}")
all_train_text_to_encode = [raw_item['question'] for raw_item in total_train_examples]
all_eval_text_to_encode = [raw_item['question'] for raw_item in total_eval_examples]
label_map = None
else:
raise ValueError(f'{args.task_name} is not supported')
return (total_train_examples, total_eval_examples, all_train_text_to_encode, all_eval_text_to_encode, format_example, label_map) |
class TFMobileViTIntermediate(tf.keras.layers.Layer):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None:
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(intermediate_size, name='dense')
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states |
class Migration(migrations.Migration):
dependencies = [('digest', '0037_auto__1548')]
operations = [migrations.AlterField(model_name='item', name='tags', field=taggit_autosuggest.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'))] |
class TestImageProcedure(Procedure):
X_start = FloatParameter('X Start Position', units='m', default=0.0)
X_end = FloatParameter('X End Position', units='m', default=2.0)
X_step = FloatParameter('X Scan Step Size', units='m', default=0.1)
Y_start = FloatParameter('Y Start Position', units='m', default=(- 1.0))
Y_end = FloatParameter('Y End Position', units='m', default=1.0)
Y_step = FloatParameter('Y Scan Step Size', units='m', default=0.1)
delay = FloatParameter('Delay', units='s', default=0.01)
DATA_COLUMNS = ['X', 'Y', 'pixel_data']
def startup(self):
log.info('starting up...')
def execute(self):
xs = np.arange(self.X_start, self.X_end, self.X_step)
ys = np.arange(self.Y_start, self.Y_end, self.Y_step)
nprog = (xs.size * ys.size)
progit = 0
for x in xs:
for y in ys:
self.emit('progress', int(((100 * progit) / nprog)))
progit += 1
self.emit('results', {'X': x, 'Y': y, 'pixel_data': np.random.rand(1)[0]})
sleep(self.delay)
if self.should_stop():
break
if self.should_stop():
break
def shutdown(self):
log.info('shutting down') |
class RTypeVisitor(Generic[T]):
def visit_rprimitive(self, typ: RPrimitive) -> T:
raise NotImplementedError
def visit_rinstance(self, typ: RInstance) -> T:
raise NotImplementedError
def visit_runion(self, typ: RUnion) -> T:
raise NotImplementedError
def visit_rtuple(self, typ: RTuple) -> T:
raise NotImplementedError
def visit_rstruct(self, typ: RStruct) -> T:
raise NotImplementedError
def visit_rarray(self, typ: RArray) -> T:
raise NotImplementedError
def visit_rvoid(self, typ: RVoid) -> T:
raise NotImplementedError |
def _set_platform_dir_class() -> type[PlatformDirsABC]:
if (sys.platform == 'win32'):
from .windows import Windows as Result
elif (sys.platform == 'darwin'):
from .macos import MacOS as Result
else:
from .unix import Unix as Result
if ((os.getenv('ANDROID_DATA') == '/data') and (os.getenv('ANDROID_ROOT') == '/system')):
if (os.getenv('SHELL') or os.getenv('PREFIX')):
return Result
from .android import _android_folder
if (_android_folder() is not None):
from .android import Android
return Android
return Result |
class NeighborDistance():
def __init__(self, gdf, spatial_weights, unique_id, verbose=True):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
results_list = []
data = gdf.set_index(unique_id).geometry
for (index, geom) in tqdm(data.items(), total=data.shape[0], disable=(not verbose)):
if ((geom is not None) and (index in spatial_weights.neighbors)):
neighbours = spatial_weights.neighbors[index]
building_neighbours = data.loc[neighbours]
if len(building_neighbours):
results_list.append(building_neighbours.geometry.distance(geom).mean())
else:
results_list.append(np.nan)
else:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index) |
class SequentialGeventHandler(object):
name = 'sequential_gevent_handler'
queue_impl = gevent.queue.Queue
queue_empty = gevent.queue.Empty
sleep_func = staticmethod(gevent.sleep)
def __init__(self):
self.callback_queue = self.queue_impl()
self._running = False
self._async = None
self._state_change = Semaphore()
self._workers = []
def running(self):
return self._running
class timeout_exception(gevent.Timeout):
def __init__(self, msg):
gevent.Timeout.__init__(self, exception=msg)
def _create_greenlet_worker(self, queue):
def greenlet_worker():
while True:
try:
func = queue.get()
try:
if (func is _STOP):
break
func()
except Exception as exc:
log.warning('Exception in worker greenlet')
log.exception(exc)
finally:
del func
except self.queue_empty:
continue
return gevent.spawn(greenlet_worker)
def start(self):
with self._state_change:
if self._running:
return
self._running = True
for queue in (self.callback_queue,):
w = self._create_greenlet_worker(queue)
self._workers.append(w)
atexit.register(self.stop)
def stop(self):
with self._state_change:
if (not self._running):
return
self._running = False
for queue in (self.callback_queue,):
queue.put(_STOP)
while self._workers:
worker = self._workers.pop()
worker.join()
self.callback_queue = self.queue_impl()
atexit.unregister(self.stop)
def select(self, *args, **kwargs):
return selector_select(*args, selectors_module=gevent.selectors, **kwargs)
def socket(self, *args, **kwargs):
return utils.create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(socket, *args, **kwargs)
def create_socket_pair(self):
return utils.create_socket_pair(socket)
def event_object(self):
return gevent.event.Event()
def lock_object(self):
return gevent.thread.allocate_lock()
def rlock_object(self):
return RLock()
def async_result(self):
return AsyncResult()
def spawn(self, func, *args, **kwargs):
return gevent.spawn(func, *args, **kwargs)
def dispatch_callback(self, callback):
self.callback_queue.put((lambda : callback.func(*callback.args))) |
.skip_fips(reason='FIPS self-test sets allow_customize = 0')
_if_memtesting_not_supported()
class TestAssertNoMemoryLeaks():
def test_no_leak_no_malloc(self):
assert_no_memory_leaks(textwrap.dedent('\n def func():\n pass\n '))
def test_no_leak_free(self):
assert_no_memory_leaks(textwrap.dedent('\n def func():\n from cryptography.hazmat.bindings.openssl.binding import Binding\n b = Binding()\n name = b.lib.X509_NAME_new()\n b.lib.X509_NAME_free(name)\n '))
def test_no_leak_gc(self):
assert_no_memory_leaks(textwrap.dedent('\n def func():\n from cryptography.hazmat.bindings.openssl.binding import Binding\n b = Binding()\n name = b.lib.X509_NAME_new()\n b.ffi.gc(name, b.lib.X509_NAME_free)\n '))
def test_leak(self):
with pytest.raises(AssertionError):
assert_no_memory_leaks(textwrap.dedent('\n def func():\n from cryptography.hazmat.bindings.openssl.binding import (\n Binding\n )\n b = Binding()\n b.lib.X509_NAME_new()\n '))
def test_errors(self):
with pytest.raises(ValueError, match='ZeroDivisionError'):
assert_no_memory_leaks(textwrap.dedent('\n def func():\n raise ZeroDivisionError\n ')) |
class VerilogTBGenPass(BasePass):
case_name = MetadataKey(str)
vtbgen_hooks = MetadataKey(list)
def __call__(self, top):
if (not top._dsl.constructed):
raise VerilogImportError(top, f'please elaborate design {top} before applying the TBGen pass!')
assert (not top.has_metadata(self.vtbgen_hooks))
tbgen_hooks = []
top.set_metadata(self.vtbgen_hooks, tbgen_hooks)
tbgen_components = []
def traverse_hierarchy(m):
if (m.has_metadata(self.case_name) and hasattr(m, '_ports')):
tbgen_components.append((m, m.get_metadata(self.case_name)))
else:
for child in m.get_child_components():
traverse_hierarchy(child)
traverse_hierarchy(top)
for (x, case_name) in tbgen_components:
signal_decls = []
task_assign_strs = []
task_signal_decls = []
task_check_strs = []
dut_signal_decls = []
py_signal_order = []
for (pname, vname, port, is_ifc) in x._ports:
if ((vname == 'reset') or (vname == 'clk')):
continue
if isinstance(port, rt.Port):
direction = port.get_direction()
elif isinstance(port, rt.Array):
direction = port.get_sub_type().get_direction()
else:
raise Exception(f'unrecognized direction {d}!')
(p_n_dim, p_rtype) = get_rtype(port)
dtype = p_rtype.get_dtype()
if isinstance(dtype, rdt.Vector):
nbits = dtype.get_length()
elif isinstance(dtype, rdt.Struct):
nbits = dtype.get_class().nbits
else:
raise Exception(f'unrecognized data type {d}!')
expanded_pname = []
if (isinstance(port, rt.Array) and (len(pname) == 1) and (not bool(is_ifc))):
Q = deque([(pname[0], p_n_dim)])
while Q:
(name, dim) = Q.popleft()
if (not dim):
expanded_pname.append(name)
else:
for i in range(dim[0]):
Q.append((f'{name}[{i}]', dim[1:]))
pname = expanded_pname
signal_decl_indices = ' '.join([f'[0:{(d - 1)}]' for d in p_n_dim])
signal_decls.append(f'logic [{(nbits - 1)}:0] {vname} {signal_decl_indices}')
if p_n_dim:
dut_signal_decls.append(f'.{vname}({{ >> {{ {vname} }} }})')
else:
dut_signal_decls.append(f'.{vname}({vname})')
Q = deque([(vname, vname, p_n_dim)])
tot = 0
while Q:
(name, mangled_name, indices) = Q.popleft()
if (not indices):
pyname = pname[tot]
if (direction == 'input'):
task_signal_decls.append(f'input logic [{(nbits - 1)}:0] inp_{mangled_name}')
task_assign_strs.append(f'{name} = inp_{mangled_name}')
else:
task_signal_decls.append(f'input logic [{(nbits - 1)}:0] ref_{mangled_name}')
task_check_strs.append(f'`CHECK(lineno, {name}, ref_{mangled_name}, "{pyname} ({name} in Verilog)")')
tot += 1
py_signal_order.append(pyname)
else:
for i in range(indices[0]):
Q.append((f'{name}[{i}]', f'{mangled_name}__{i}', indices[1:]))
dut_name = x._ip_cfg.translated_top_module
with open(f'{dut_name}_{case_name}_tb.v', 'w') as output:
output.write(tb_template.format(args_strs=','.join([f'a{i}' for i in range(len(task_signal_decls))]), harness_name=(dut_name + '_tb'), signal_decls=';\n '.join(signal_decls), task_signal_decls=',\n '.join(task_signal_decls), task_assign_strs=';\n '.join(task_assign_strs), task_check_strs=';\n '.join(task_check_strs), dut_name=dut_name, dut_clk_decl=('.clk(clk)' if x._ph_cfg.has_clk else ''), dut_reset_decl=('.reset(reset)' if x._ph_cfg.has_reset else ''), dut_signal_decls=',\n '.join(dut_signal_decls), cases_file_name=f'{dut_name}_{case_name}_tb.v.cases'))
case_file = open(f'{dut_name}_{case_name}_tb.v.cases', 'w')
tbgen_hooks.append(self.gen_hook_func(top, x, py_signal_order, case_file))
def gen_hook_func(top, x, ports, case_file):
port_srcs = [f"'h{{str(x.{p}.to_bits())}}" for p in ports]
src = '\ndef dump_case():\n if top.sim_cycle_count() > 2: # skip the 2 cycles of reset\n print(f"`T({});", file=case_file, flush=True)\n'.format(','.join(port_srcs))
_locals = {}
custom_exec(py.code.Source(src).compile(), {'top': top, 'x': x, 'case_file': case_file}, _locals)
return _locals['dump_case'] |
class SimpleCNNMNIST_header(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=10, input_channels=1):
super(SimpleCNNMNIST_header, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 6, 5)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(input_dim, hidden_dims[0])
self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1])
def forward(self, x):
x = self.pool(self.relu(self.conv1(x)))
x = self.pool(self.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 4) * 4))
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return x |
def _feature_tokenize(string, layer=0, tok_delim=None, feat_delim=None, truncate=None):
tokens = string.split(tok_delim)
if (truncate is not None):
tokens = tokens[:truncate]
if (feat_delim is not None):
tokens = [t.split(feat_delim)[layer] for t in tokens]
return tokens |
def run_data_migration(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Task = apps.get_model('tasks', 'Task')
View = apps.get_model('views', 'View')
for project in Project.objects.all():
tasks = Task.objects.filter(sites=settings.SITE_ID)
for task in tasks:
project.tasks.add(task)
views = View.objects.filter(sites=settings.SITE_ID).filter((models.Q(catalogs=None) | models.Q(catalogs=project.catalog)))
for view in views:
project.views.add(view) |
class model_gx(nn.Module):
def __init__(self, input_size, output_size):
super(model_gx, self).__init__()
self.linear1 = nn.Linear(input_size, 256)
self.bn1 = nn.BatchNorm1d(256)
self.linear2 = nn.Linear(256, 128)
self.bn2 = nn.BatchNorm1d(128)
self.linear3 = nn.Linear(128, 256)
self.bn3 = nn.BatchNorm1d(256)
self.linear4 = nn.Linear(256, output_size)
def forward(self, feature):
f = self.linear1(feature)
f = self.bn1(f)
f = F.relu(f)
f = self.linear2(f)
f = self.bn2(f)
f = F.relu(f)
f = self.linear3(f)
f = self.bn3(f)
f = F.relu(f)
f = self.linear4(f)
output = torch.tanh(f)
return output |
def playground(cfg):
set_seed(cfg)
state_dict = find_model(cfg.resume_path)
train_dataset = ParameterDataset(dataset_dir=cfg.dataset.path, dataset_name=cfg.dataset.name, num_test_runs=cfg.dataset.num_test_runs, openai_coeff=cfg.dataset.openai_coeff, normalizer_name=cfg.dataset.normalizer, split='train', train_metric=cfg.dataset.train_metric, permute_augment=cfg.dataset.augment, target_epoch_size=cfg.dataset.target_epoch_size)
dataset = ParameterDataset(dataset_dir=cfg.dataset.path, dataset_name=cfg.dataset.name, num_test_runs=cfg.dataset.num_test_runs, openai_coeff=cfg.dataset.openai_coeff, normalizer_name=cfg.dataset.normalizer, min_val=train_dataset.min_val, max_val=train_dataset.max_val, split='test', train_metric=cfg.dataset.train_metric, permute_augment=cfg.dataset.augment, target_epoch_size=cfg.dataset.target_epoch_size)
model = Gpt(parameter_sizes=train_dataset.parameter_sizes, parameter_names=train_dataset.parameter_names, predict_xstart=cfg.transformer.predict_xstart, absolute_loss_conditioning=cfg.transformer.absolute_loss_conditioning, chunk_size=cfg.transformer.chunk_size, split_policy=cfg.transformer.split_policy, max_freq_log2=cfg.transformer.max_freq_log2, num_frequencies=cfg.transformer.num_frequencies, n_embd=cfg.transformer.n_embd, encoder_depth=cfg.transformer.encoder_depth, decoder_depth=cfg.transformer.decoder_depth, n_layer=cfg.transformer.n_layer, n_head=cfg.transformer.n_head, attn_pdrop=cfg.transformer.dropout_prob, resid_pdrop=cfg.transformer.dropout_prob, embd_pdrop=cfg.transformer.dropout_prob)
diffusion = create_diffusion(learn_sigma=False, predict_xstart=cfg.transformer.predict_xstart, noise_schedule='linear', steps=1000)
if cfg.transformer.ema:
print('Loading EMA model...')
model.load_state_dict(state_dict['G_ema'])
else:
print('Loading instantaneous model...')
model.load_state_dict(state_dict['G'])
model = model.to('cuda')
model.eval()
generate_interpolation_samples(model, diffusion, dataset, seed=cfg.rng_seed) |
_grad()
def predict(dataloader, model, n_samples=1, T=1):
py = []
for (x, _) in dataloader:
x = x.cuda()
py_ = 0
for _ in range(n_samples):
f_s = model.forward(x)
py_ += torch.softmax((f_s / T), 1)
py_ /= n_samples
py.append(py_)
return torch.cat(py, dim=0) |
def test_ip6_addresses_to_indexes():
interfaces = [1]
with patch('zeroconf._utils.net.ifaddr.get_adapters', return_value=_generate_mock_adapters()):
assert (netutils.ip6_addresses_to_indexes(interfaces) == [(('2001:db8::', 1, 1), 1)])
interfaces_2 = ['2001:db8::']
with patch('zeroconf._utils.net.ifaddr.get_adapters', return_value=_generate_mock_adapters()):
assert (netutils.ip6_addresses_to_indexes(interfaces_2) == [(('2001:db8::', 1, 1), 1)]) |
def generate(opts):
cli.validate_password_if_provided(opts)
print('Will generate a root CA and two certificate/key pairs (server and client)')
g.generate_root_ca(opts)
cn = opts.common_name
name = 'server_{}'.format(cn)
g.generate_leaf_certificate_and_key_pair('server', opts, name)
name = 'client_{}'.format(cn)
g.generate_leaf_certificate_and_key_pair('client', opts, name)
_copy_artifacts_to_results(opts)
print('Done! Find generated certificates and private keys under ./result!') |
def list_from_param(param):
if (not param):
return []
elif isinstance(param, list):
return param
elif isinstance(param, str):
if isfile(param):
with read_file(param) as f:
return f.read().splitlines()
else:
return param.split(',') |
def train(start_epoch):
global EPOCH_CNT
min_loss = .0
loss = 0
for epoch in range(start_epoch, MAX_EPOCH):
EPOCH_CNT = epoch
log_string(('**** EPOCH %03d ****' % epoch))
log_string(('Current learning rate: %f' % get_current_lr(epoch)))
log_string(('Current BN decay momentum: %f' % bnm_scheduler.lmbd(bnm_scheduler.last_epoch)))
log_string(str(datetime.now()))
np.random.seed()
train_one_epoch()
if ((EPOCH_CNT == 0) or ((EPOCH_CNT % 10) == 9)):
evaluate_one_epoch()
save_dict = {'epoch': ((epoch + 1) - 10), 'optimizer_state_dict': optimizer.state_dict()}
try:
save_dict['model_state_dict'] = net.module.state_dict()
except:
save_dict['model_state_dict'] = net.state_dict()
torch.save(save_dict, os.path.join(LOG_DIR, 'train_BR.tar')) |
class RulePtr():
__slots__ = ('rule', 'index')
rule: Rule
index: int
def __init__(self, rule: Rule, index: int):
assert isinstance(rule, Rule)
assert (index <= len(rule.expansion))
self.rule = rule
self.index = index
def __repr__(self):
before = [x.name for x in self.rule.expansion[:self.index]]
after = [x.name for x in self.rule.expansion[self.index:]]
return ('<%s : %s * %s>' % (self.rule.origin.name, ' '.join(before), ' '.join(after)))
def next(self) -> Symbol:
return self.rule.expansion[self.index]
def advance(self, sym: Symbol) -> 'RulePtr':
assert (self.next == sym)
return RulePtr(self.rule, (self.index + 1))
def is_satisfied(self) -> bool:
return (self.index == len(self.rule.expansion))
def __eq__(self, other) -> bool:
if (not isinstance(other, RulePtr)):
return NotImplemented
return ((self.rule == other.rule) and (self.index == other.index))
def __hash__(self) -> int:
return hash((self.rule, self.index)) |
_specialize
_rewriter([pt_pow])
def local_pow_to_nested_squaring(fgraph, node):
odtype = node.outputs[0].dtype
xsym = node.inputs[0]
ysym = node.inputs[1]
y = get_constant(ysym)
if isinstance(y, np.ndarray):
assert (y.size == 1)
try:
y = y[0]
except IndexError:
pass
if ((y is not None) and (not broadcasted_by(xsym, ysym))):
rval = None
if ((abs(y) == int(abs(y))) and (abs(y) <= 512)):
pow2 = [xsym]
pow2_scal = [ps.get_scalar_type(xsym.dtype)()]
y_to_do = abs(y)
for i in range(int(np.log2(y_to_do))):
pow2.append(sqr(pow2[i]))
pow2_scal.append(ps.sqr(pow2_scal[i]))
rval1 = None
rval1_scal = None
while (y_to_do > 0):
log_to_do = int(np.log2(y_to_do))
if rval1:
rval1 *= pow2[log_to_do]
rval1_scal *= pow2_scal[log_to_do]
else:
rval1 = pow2[log_to_do]
rval1_scal = pow2_scal[log_to_do]
y_to_do -= (2 ** log_to_do)
if (abs(y) > 2):
rval1 = Elemwise(ps.Composite([pow2_scal[0]], [rval1_scal])).make_node(xsym)
if (y < 0):
rval = [reciprocal(rval1)]
else:
rval = [rval1]
if rval:
rval[0] = cast(rval[0], odtype)
if (rval[0].type.broadcastable != node.outputs[0].type.broadcastable):
return None
return rval |
class STM32F4xxFlash(QlPeripheral):
class Type(ctypes.Structure):
_fields_ = [('ACR', ctypes.c_uint32), ('KEYR', ctypes.c_uint32), ('OPTKEYR', ctypes.c_uint32), ('SR', ctypes.c_uint32), ('CR', ctypes.c_uint32), ('OPTCR', ctypes.c_uint32), ('OPTCR1', ctypes.c_uint32)]
def __init__(self, ql: Qiling, label: str, intn: int=None):
super().__init__(ql, label)
self.intn = intn
self.instance = self.struct()
()
def read(self, offset: int, size: int) -> int:
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, (ctypes.addressof(self.instance) + offset), size)
return int.from_bytes(buf.raw, byteorder='little')
()
def write(self, offset: int, size: int, value: int):
data = value.to_bytes(size, 'little')
ctypes.memmove((ctypes.addressof(self.instance) + offset), data, size) |
def get_dataloader(root_dir, local_rank, batch_size, dali=False, seed=2048, num_workers=2) -> Iterable:
rec = os.path.join(root_dir, 'train.rec')
idx = os.path.join(root_dir, 'train.idx')
train_set = None
if (root_dir == 'synthetic'):
train_set = SyntheticDataset()
dali = False
elif (os.path.exists(rec) and os.path.exists(idx)):
train_set = MXFaceDataset(root_dir=root_dir, local_rank=local_rank)
else:
transform = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
train_set = ImageFolder(root_dir, transform)
if dali:
return dali_data_iter(batch_size=batch_size, rec_file=rec, idx_file=idx, num_threads=2, local_rank=local_rank)
(rank, world_size) = get_dist_info()
train_sampler = DistributedSampler(train_set, num_replicas=world_size, rank=rank, shuffle=True, seed=seed)
if (seed is None):
init_fn = None
else:
init_fn = partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)
train_loader = DataLoaderX(local_rank=local_rank, dataset=train_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers, pin_memory=True, drop_last=True, worker_init_fn=init_fn)
return train_loader |
class BadgeScannerQuery():
(permission_classes=[IsAuthenticated])
def badge_scan(self, info: Info, id: strawberry.ID) -> (BadgeScan | None):
try:
scan = models.BadgeScan.objects.get(id=id, scanned_by_id=info.context.request.user.id)
except models.BadgeScan.DoesNotExist:
return None
return BadgeScan.from_db(scan)
(permission_classes=[IsAuthenticated])
def badge_scans(self, info: Info, conference_code: str, page: (int | None)=1, page_size: int=100) -> Paginated[BadgeScan]:
scans = models.BadgeScan.objects.filter(conference__code=conference_code, scanned_by_id=info.context.request.user.id).order_by('-created')
page = (page or 1)
total_scans = scans.count()
scans = scans[((page - 1) * page_size):(page * page_size)]
return Paginated.paginate_list(items=[BadgeScan.from_db(scan) for scan in scans], page_size=page_size, total_items=total_scans, page=page) |
class DisjunctiveTrie():
def __init__(self, nested_token_ids: List[List[int]], no_subsets=True):
self.max_height = max([len(one) for one in nested_token_ids])
root = dict()
for token_ids in nested_token_ids:
level = root
for (tidx, token_id) in enumerate(token_ids):
if (token_id not in level):
level[token_id] = dict()
level = level[token_id]
if (no_subsets and self.has_subsets(root, nested_token_ids)):
raise ValueError(f"Each list in `nested_token_ids` can't be a complete subset of another list, but is {nested_token_ids}.")
self.trie = root
def next_tokens(self, current_seq):
start = self.trie
for current_token in current_seq:
start = start[current_token]
next_tokens = list(start.keys())
return next_tokens
def reached_leaf(self, current_seq):
next_tokens = self.next_tokens(current_seq)
return (len(next_tokens) == 0)
def count_leaves(self, root):
next_nodes = list(root.values())
if (len(next_nodes) == 0):
return 1
else:
return sum([self.count_leaves(nn) for nn in next_nodes])
def has_subsets(self, trie, nested_token_ids):
leaf_count = self.count_leaves(trie)
return (len(nested_token_ids) != leaf_count) |
def apply_diff(cache_dir: str, diff_file: str, sqlite: bool=False) -> None:
cache = make_cache(cache_dir, sqlite)
with open(diff_file) as f:
diff = json.load(f)
old_deps = json.loads(cache.read('.json'))
for (file, data) in diff.items():
if (data is None):
cache.remove(file)
else:
cache.write(file, data)
if (file.endswith('.meta.json') and ('' not in file)):
meta = json.loads(data)
old_deps['snapshot'][meta['id']] = meta['hash']
cache.write('.json', json.dumps(old_deps))
cache.commit() |
def get_debugger():
try:
from IPython.core.debugger import Pdb
pdb = Pdb()
except ImportError:
try:
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
IPShell(argv=[''])
pdb = Pdb()
except ImportError:
warnings.warn('pdb was selected as a debugger. If you want to use ipython as a debugger you have to "pip install radish-bdd[ipython-debugger]"')
import pdb
return pdb |
class CodeAssistInProjectsTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
samplemod = testutils.create_module(self.project, 'samplemod')
code = dedent(' class SampleClass(object):\n def sample_method():\n pass\n\n def sample_func():\n pass\n sample_var = 10\n\n def _underlined_func():\n pass\n\n ')
samplemod.write(code)
package = testutils.create_package(self.project, 'package')
nestedmod = testutils.create_module(self.project, 'nestedmod', package)
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def _assist(self, code, resource=None, **kwds):
return code_assist(self.project, code, len(code), resource, **kwds)
def assert_completion_in_result(self, name, scope, result):
for proposal in result:
if ((proposal.name == name) and (proposal.scope == scope)):
return
self.fail(('completion <%s> not proposed' % name))
def assert_completion_not_in_result(self, name, scope, result):
for proposal in result:
if ((proposal.name == name) and (proposal.scope == scope)):
self.fail(('completion <%s> was proposed' % name))
def test_simple_import(self):
code = dedent(' import samplemod\n sample')
result = self._assist(code)
self.assert_completion_in_result('samplemod', 'imported', result)
def test_from_import_class(self):
code = dedent(' from samplemod import SampleClass\n Sample')
result = self._assist(code)
self.assert_completion_in_result('SampleClass', 'imported', result)
def test_from_import_function(self):
code = dedent(' from samplemod import sample_func\n sample')
result = self._assist(code)
self.assert_completion_in_result('sample_func', 'imported', result)
def test_from_import_variable(self):
code = dedent(' from samplemod import sample_var\n sample')
result = self._assist(code)
self.assert_completion_in_result('sample_var', 'imported', result)
def test_from_imports_inside_functions(self):
code = dedent(' def f():\n from samplemod import SampleClass\n Sample')
result = self._assist(code)
self.assert_completion_in_result('SampleClass', 'imported', result)
def test_from_import_only_imports_imported(self):
code = dedent(' from samplemod import sample_func\n Sample')
result = self._assist(code)
self.assert_completion_not_in_result('SampleClass', 'global', result)
def test_from_import_star(self):
code = dedent(' from samplemod import *\n Sample')
result = self._assist(code)
self.assert_completion_in_result('SampleClass', 'imported', result)
def test_from_import_star2(self):
code = dedent(' from samplemod import *\n sample')
result = self._assist(code)
self.assert_completion_in_result('sample_func', 'imported', result)
self.assert_completion_in_result('sample_var', 'imported', result)
def test_from_import_star_not_importing_underlined(self):
code = dedent(' from samplemod import *\n _under')
result = self._assist(code)
self.assert_completion_not_in_result('_underlined_func', 'global', result)
def test_from_package_import_mod(self):
code = dedent(' from package import nestedmod\n nest')
result = self._assist(code)
self.assert_completion_in_result('nestedmod', 'imported', result)
def test_completing_after_dot(self):
code = dedent(' class SampleClass(object):\n def sample_method(self):\n pass\n SampleClass.sam')
result = self._assist(code)
self.assert_completion_in_result('sample_method', 'attribute', result)
def test_completing_after_multiple_dots(self):
code = dedent(' class Class1(object):\n class Class2(object):\n def sample_method(self):\n pass\n Class1.Class2.sam')
result = self._assist(code)
self.assert_completion_in_result('sample_method', 'attribute', result)
def test_completing_after_self_dot(self):
code = dedent(' class Sample(object):\n def method1(self):\n pass\n def method2(self):\n self.m')
result = self._assist(code)
self.assert_completion_in_result('method1', 'attribute', result)
def test_result_start_offset_for_dotted_completions(self):
code = dedent(' class Sample(object):\n def method1(self):\n pass\n Sample.me')
self.assertEqual((len(code) - 2), starting_offset(code, len(code)))
def test_backslash_after_dots(self):
code = dedent(' class Sample(object):\n def a_method(self):\n pass\n Sample.\\\n a_m')
result = self._assist(code)
self.assert_completion_in_result('a_method', 'attribute', result)
def test_not_proposing_global_names_after_dot(self):
code = dedent(' class Sample(object):\n def a_method(self):\n pass\n Sample.')
result = self._assist(code)
self.assert_completion_not_in_result('Sample', 'global', result)
def test_assist_on_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write(dedent(' def a_func():\n pass\n '))
code = dedent(' import mod1\n mod1.')
result = self._assist(code, resource=mod2)
self.assert_completion_in_result('a_func', 'imported', result)
def test_get_location_on_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write(dedent(' def a_func():\n pass\n '))
code = dedent(' import mod1\n mod1.a_func\n ')
result = get_definition_location(self.project, code, (len(code) - 2), mod2)
self.assertEqual((mod1, 1), result)
def test_get_definition_location_for_builtins(self):
code = 'import sys\n'
result = get_definition_location(self.project, code, (len(code) - 2))
self.assertEqual((None, None), result)
def test_get_doc_on_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write(dedent(' def a_func():\n """hey"""\n pass\n '))
code = dedent(' import mod1\n mod1.a_func\n ')
result = get_doc(self.project, code, (len(code) - 2), mod2)
self.assertTrue(result.endswith('hey'))
def test_get_doc_on_from_import_module(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write(dedent(' """mod1 docs"""\n var = 1\n '))
code = 'from mod1 import var\n'
result = get_doc(self.project, code, code.index('mod1'))
result.index('mod1 docs')
def test_fixing_errors_with_maxfixes_in_resources(self):
mod = testutils.create_module(self.project, 'mod')
code = dedent(' def f():\n sldj sldj\n def g():\n ran')
mod.write(code)
result = self._assist(code, maxfixes=2, resource=mod)
self.assertTrue((len(result) > 0))
def test_completing_names_after_from_import(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('myvar = None\n')
result = self._assist('from mod1 import myva', resource=mod2)
self.assertTrue((len(result) > 0))
self.assert_completion_in_result('myvar', 'global', result)
def test_completing_names_after_from_import_and_sorted_proposals(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('myvar = None\n')
result = self._assist('from mod1 import myva', resource=mod2)
result = sorted_proposals(result)
self.assertTrue((len(result) > 0))
self.assert_completion_in_result('myvar', 'global', result)
def test_completing_names_after_from_import2(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('myvar = None\n')
result = self._assist('from mod1 import ', resource=mod2)
self.assertTrue((len(result) > 0))
self.assert_completion_in_result('myvar', 'global', result)
def test_starting_expression(self):
code = dedent(' l = list()\n l.app')
self.assertEqual('l.app', starting_expression(code, len(code))) |
def test_kraus_map(dimensions, dtype):
if (isinstance(dimensions, list) and isinstance(dimensions[0], list)):
with pytest.raises(TypeError) as err:
kmap = rand_kraus_map(dimensions, dtype=dtype)
assert ('super operator' in str(err.value))
else:
kmap = rand_kraus_map(dimensions, dtype=dtype)
_assert_metadata(kmap[0], dimensions, dtype)
with CoreOptions(atol=1e-09):
assert kraus_to_choi(kmap).iscptp |
class ClassificationModel(object):
def __init__(self, K, is_test=False, seed=0):
if is_test:
class ARGS():
num_inducing = 2
iterations = 1
small_iterations = 1
adam_lr = 0.01
minibatch_size = 100
else:
class ARGS():
num_inducing = 100
iterations = 10000
small_iterations = 1000
adam_lr = 0.01
minibatch_size = 1000
self.ARGS = ARGS
self.K = K
self.model = None
def fit(self, X, Y):
Z = (kmeans2(X, self.ARGS.num_inducing, minit='points')[0] if (X.shape[0] > self.ARGS.num_inducing) else X.copy())
if (not self.model):
mb_size = (self.ARGS.minibatch_size if (X.shape[0] >= self.ARGS.minibatch_size) else None)
if (self.K == 2):
lik = gpflow.likelihoods.Bernoulli()
num_latent = 1
else:
lik = gpflow.likelihoods.MultiClass(self.K)
num_latent = self.K
kern = gpflow.kernels.RBF(X.shape[1], lengthscales=(float(X.shape[1]) ** 0.5))
self.model = gpflow.models.SVGP(X, Y, kern, lik, feat=Z, whiten=False, num_latent=num_latent, minibatch_size=mb_size)
self.opt = gpflow.train.AdamOptimizer(self.ARGS.adam_lr)
self.sess = self.model.enquire_session()
iters = self.ARGS.iterations
else:
iters = self.ARGS.small_iterations
self.model.X.assign(X, session=self.sess)
self.model.Y.assign(Y, session=self.sess)
self.model.feature.Z.assign(Z, session=self.sess)
num_outputs = self.model.q_sqrt.shape[0]
self.model.q_mu.assign(np.zeros((self.ARGS.num_inducing, num_outputs)), session=self.sess)
self.model.q_sqrt.assign(np.tile(np.eye(self.ARGS.num_inducing)[None], [num_outputs, 1, 1]), session=self.sess)
self.opt.minimize(self.model, maxiter=iters, session=self.sess)
def predict(self, Xs):
(m, v) = self.model.predict_y(Xs, session=self.sess)
if (self.K == 2):
return np.concatenate([(1 - m), m], 1)
else:
return m |
def main(_):
(model_config, train_config, input_config) = get_configs_from_pipeline_file()
model_fn = functools.partial(build_man_model, model_config=model_config, is_training=True)
create_input_dict_fn = functools.partial(input_reader.read_seq, input_config)
trainer_seq.train(model_fn, create_input_dict_fn, train_config, FLAGS.train_dir, FLAGS.image_root) |
def test_setup_cfg(testdir, xdist_args):
testdir.makefile('.cfg', setup='\n [mypy]\n disallow_untyped_defs = True\n ')
testdir.makepyfile(conftest='\n def pyfunc(x):\n return x * 2\n ')
result = testdir.runpytest_subprocess('--mypy', *xdist_args)
result.stdout.fnmatch_lines(['1: error: Function is missing a type annotation*'])
assert (result.ret != 0) |
class Label(datatype('Label', ['key', 'value', 'uuid', 'source_type_name', 'media_type_name'])):
def for_label(cls, label):
if (label is None):
return None
return Label(db_id=label.id, key=label.key, value=label.value, uuid=label.uuid, media_type_name=model.label.get_media_types()[label.media_type_id], source_type_name=model.label.get_label_source_types()[label.source_type_id]) |
def Fmt_test():
Print_Function()
e3d = Ga('e1 e2 e3', g=[1, 1, 1])
v = e3d.mv('v', 'vector')
B = e3d.mv('B', 'bivector')
M = e3d.mv('M', 'mv')
Fmt(2)
print('#Global $Fmt = 2$')
print('v =', v)
print('B =', B)
print('M =', M)
print('#Using $.Fmt()$ Function')
print('v.Fmt(3) =', v.Fmt(3))
print('B.Fmt(3) =', B.Fmt(3))
print('M.Fmt(2) =', M.Fmt(2))
print('M.Fmt(1) =', M.Fmt(1))
print('#Global $Fmt = 1$')
Fmt(1)
print('v =', v)
print('B =', B)
print('M =', M)
return |
(dataset=dataset_utm_north_down())
def test_window_rt_north_down(dataset):
(left, top) = (dataset.transform * (0, 0))
(right, bottom) = (dataset.transform * (dataset.width, dataset.height))
assert_windows_almost_equal(dataset.window(left, bottom, right, top), windows.Window(0, 0, dataset.width, dataset.height)) |
def test_jsx():
assert (list(jslexer.tokenize('\n <option value="val1">{ i18n._(\'String1\') }</option>\n <option value="val2">{ i18n._(\'String 2\') }</option>\n <option value="val3">{ i18n._(\'String 3\') }</option>\n <component value={i18n._(\'String 4\')} />\n <comp2 prop={<comp3 />} data={{active: true}}>\n <btn text={ i18n._(\'String 5\') } />\n </comp2>\n ', jsx=True)) == [('jsx_tag', '<option', 2), ('name', 'value', 2), ('operator', '=', 2), ('string', '"val1"', 2), ('operator', '>', 2), ('operator', '{', 2), ('name', 'i18n._', 2), ('operator', '(', 2), ('string', "'String1'", 2), ('operator', ')', 2), ('operator', '}', 2), ('jsx_tag', '</option', 2), ('operator', '>', 2), ('jsx_tag', '<option', 3), ('name', 'value', 3), ('operator', '=', 3), ('string', '"val2"', 3), ('operator', '>', 3), ('operator', '{', 3), ('name', 'i18n._', 3), ('operator', '(', 3), ('string', "'String 2'", 3), ('operator', ')', 3), ('operator', '}', 3), ('jsx_tag', '</option', 3), ('operator', '>', 3), ('jsx_tag', '<option', 4), ('name', 'value', 4), ('operator', '=', 4), ('string', '"val3"', 4), ('operator', '>', 4), ('operator', '{', 4), ('name', 'i18n._', 4), ('operator', '(', 4), ('string', "'String 3'", 4), ('operator', ')', 4), ('operator', '}', 4), ('jsx_tag', '</option', 4), ('operator', '>', 4), ('jsx_tag', '<component', 5), ('name', 'value', 5), ('operator', '=', 5), ('operator', '{', 5), ('name', 'i18n._', 5), ('operator', '(', 5), ('string', "'String 4'", 5), ('operator', ')', 5), ('operator', '}', 5), ('jsx_tag', '/>', 5), ('jsx_tag', '<comp2', 6), ('name', 'prop', 6), ('operator', '=', 6), ('operator', '{', 6), ('jsx_tag', '<comp3', 6), ('jsx_tag', '/>', 6), ('operator', '}', 6), ('name', 'data', 6), ('operator', '=', 6), ('operator', '{', 6), ('operator', '{', 6), ('name', 'active', 6), ('operator', ':', 6), ('name', 'true', 6), ('operator', '}', 6), ('operator', '}', 6), ('operator', '>', 6), ('jsx_tag', '<btn', 7), ('name', 'text', 7), ('operator', '=', 7), ('operator', '{', 7), ('name', 'i18n._', 7), ('operator', '(', 7), ('string', "'String 5'", 7), ('operator', ')', 7), ('operator', '}', 7), ('jsx_tag', '/>', 7), ('jsx_tag', '</comp2', 8), ('operator', '>', 8)]) |
class Acquirer():
def __init__(self, size: int, init_size: Union[(int, float)]=0.01, batch_sizes: Iterable[Union[(int, float)]]=[0.01], metric: str='greedy', epsilon: float=0.0, beta: int=2, xi: float=0.01, threshold: float=float('-inf'), temp_i: Optional[float]=None, temp_f: Optional[float]=1.0, seed: Optional[int]=None, verbose: int=0, **kwargs):
self.size = size
self.init_size = init_size
self.batch_sizes = batch_sizes
self.metric = metric
self.stochastic_preds = False
if (not (0.0 <= epsilon <= 1.0)):
raise ValueError(f'Epsilon(={epsilon}) must be in [0, 1]')
self.epsilon = epsilon
self.beta = beta
self.xi = xi
self.threshold = threshold
self.temp_i = temp_i
self.temp_f = temp_f
self.seed = seed
self.verbose = verbose
metrics.set_seed(self.seed)
def __len__(self) -> int:
return self.size
def needs(self) -> Set[str]:
return metrics.get_needs(self.metric)
def init_size(self) -> int:
return self.__init_size
_size.setter
def init_size(self, init_size: Union[(int, float)]):
if isinstance(init_size, float):
if ((init_size < 0) or (init_size > 1)):
raise ValueError(f'init_size(={init_size} must be in [0, 1]')
init_size = math.ceil((self.size * init_size))
if (init_size < 0):
raise ValueError(f'init_size(={init_size}) must be positive')
self.__init_size = init_size
def batch_sizes(self) -> List[int]:
return self.__batch_sizes
_sizes.setter
def batch_sizes(self, batch_sizes: Iterable[Union[(int, float)]]):
self.__batch_sizes = [bs for bs in batch_sizes]
for (i, bs) in enumerate(self.__batch_sizes):
if isinstance(bs, float):
if ((bs < 0) or (bs > 1)):
raise ValueError(f'batch_size(={bs} must be in [0, 1]')
self.__batch_sizes[i] = math.ceil((self.size * bs))
if (bs < 0):
raise ValueError(f'batch_size(={bs} must be positive')
def reset(self):
metrics.set_seed(self.seed)
def acquire_initial(self, xs: Iterable[T], cluster_ids: Optional[Iterable[int]]=None, cluster_sizes: Optional[Mapping[(int, int)]]=None) -> List[T]:
U = metrics.random(np.empty(self.size))
if ((cluster_ids is None) and (cluster_sizes is None)):
heap = []
for (x, u) in tqdm(zip(xs, U), total=U.size, desc='Acquiring'):
if (len(heap) < self.init_size):
heapq.heappush(heap, (u, x))
else:
heapq.heappushpop(heap, (u, x))
else:
d_cid_heap = {cid: ([], math.ceil(((self.init_size * cluster_size) / U.size))) for (cid, cluster_size) in cluster_sizes.items()}
for (x, u, cid) in tqdm(zip(xs, U, cluster_ids), 'Acquiring', U.size, disable=(self.verbose < 1)):
(heap, heap_size) = d_cid_heap[cid]
if (len(heap) < heap_size):
heapq.heappush(heap, (u, x))
else:
heapq.heappushpop(heap, (u, x))
heaps = [heap for (heap, _) in d_cid_heap.values()]
heap = list(chain(*heaps))
if (self.verbose > 0):
print(f' Selected {len(heap)} initial samples')
return [x for (_, x) in heap]
def acquire_batch(self, xs: Iterable[T], y_means: Iterable[float], y_vars: Iterable[float], explored: Optional[Mapping]=None, k: int=1, cluster_ids: Optional[Iterable[int]]=None, cluster_sizes: Optional[Mapping[(int, int)]]=None, t: Optional[int]=None, **kwargs) -> List[T]:
if explored:
ys = list(explored.values())
Y = np.nan_to_num(np.array(ys, dtype=float), nan=(- np.inf))
current_max = np.partition(Y, (- k))[(- k)]
else:
explored = {}
current_max = float('-inf')
try:
batch_size = self.batch_sizes[t]
except (IndexError, TypeError):
batch_size = self.batch_sizes[(- 1)]
begin = default_timer()
Y_mean = np.array(y_means)
Y_var = np.array(y_vars)
if (self.verbose > 1):
print('Calculating acquisition utilities ...', end=' ')
U = metrics.calc(self.metric, Y_mean, Y_var, current_max, self.threshold, self.beta, self.xi, self.stochastic_preds)
idxs = np.random.choice(U.size, math.ceil((batch_size * self.epsilon)), False)
U[idxs] = np.inf
if (self.verbose > 1):
print('Done!')
if (self.verbose > 2):
total = (default_timer() - begin)
(mins, secs) = divmod(int(total), 60)
print(f' Utility calculation took {mins}m {secs}s')
if ((cluster_ids is None) and (cluster_sizes is None)):
heap = []
for (x, u) in tqdm(zip(xs, U), 'Acquiring', U.size, disable=(self.verbose < 1)):
if (x in explored):
continue
if (len(heap) < batch_size):
heapq.heappush(heap, (u, x))
else:
heapq.heappushpop(heap, (u, x))
else:
d_cid_heap = {cid: ([], math.ceil(((batch_size * cluster_size) / U.size))) for (cid, cluster_size) in cluster_sizes.items()}
global_pred_max = float('-inf')
for (x, y_pred, u, cid) in tqdm(zip(xs, Y_mean, U, cluster_ids), total=U.size, desc='Acquiring'):
global_pred_max = max(y_pred, global_pred_max)
if (x in explored):
continue
(heap, heap_size) = d_cid_heap[cid]
if (len(heap) < heap_size):
heapq.heappush(heap, (u, x))
else:
heapq.heappushpop(heap, (u, x))
if (self.temp_i and self.temp_f):
d_cid_heap = self.scale_heaps(d_cid_heap, global_pred_max, t)
heaps = [heap for (heap, _) in d_cid_heap.values()]
heap = list(chain(*heaps))
if (self.verbose > 1):
print(f'Selected {len(heap)} new samples')
if (self.verbose > 2):
total = (default_timer() - begin)
(mins, secs) = divmod(int(total), 60)
print(f' Batch acquisition took {mins}m {secs}s')
return [x for (_, x) in heap]
def scale_heaps(self, d_cid_heap: Dict[(int, List)], global_pred_max: float, it: int):
temp = Acquirer.temp(it, self.temp_i, self.temp_f)
for (cid, (heap, heap_size)) in d_cid_heap.items():
if (len(heap) == 0):
continue
pred_local_max = max(heap, key=(lambda yx: ((- 1) if math.isinf(yx[0]) else yx[0])))
lam = Acquirer.decay(global_pred_max, pred_local_max, temp)
new_heap_size = math.ceil((lam * heap_size))
new_heap = heapq.nlargest(new_heap_size, heap)
d_cid_heap[cid] = (new_heap, new_heap_size)
return d_cid_heap
def temp(it: int, temp_i, temp_f) -> float:
return (((temp_i - temp_f) * math.exp((- it))) + temp_f)
def decay(global_max: float, local_max: float, temp: float) -> float:
return math.exp(((- (global_max - local_max)) / temp)) |
class BaseDebugCommand(BaseCommand):
def __init__(self, *args, **kwargs):
if (not settings.DEBUG):
raise CommandError('This command is not allowed in production. Set DEBUG to False to use this command.')
super().__init__(*args, **kwargs)
def handle(self, *args, **options):
raise NotImplementedError('Provide a handle() method yourself!') |
class TestAsyncGenerator(TestNameCheckVisitorBase):
_passes()
def test_async_iterator(self):
import collections.abc
from typing import AsyncIterator
async def gen() -> AsyncIterator[int]:
(yield 3)
(yield 'not an int')
async def capybara() -> None:
assert_is_value(gen(), GenericValue(collections.abc.AsyncIterator, [TypedValue(int)]))
async for i in gen():
assert_is_value(i, TypedValue(int))
_passes()
def test_async_generator(self):
import collections.abc
from typing import AsyncGenerator
async def gen() -> AsyncGenerator[(int, None)]:
(yield 3)
(yield 'not an int')
async def capybara() -> None:
assert_is_value(gen(), GenericValue(collections.abc.AsyncGenerator, [TypedValue(int), KnownValue(None)]))
async for i in gen():
assert_is_value(i, TypedValue(int))
_passes()
def test_async_comprehension_over_generator(self):
import collections.abc
from typing import AsyncIterator
async def f() -> AsyncIterator[int]:
(yield 1)
(yield 2)
async def capybara():
x = f()
assert_is_value(x, GenericValue(collections.abc.AsyncIterator, [TypedValue(int)]))
ints = [i async for i in x]
assert_is_value(ints, SequenceValue(list, [(True, TypedValue(int))]))
_passes()
def test_send_type(self):
from typing import AsyncGenerator
async def capybara() -> AsyncGenerator[(int, str)]:
x = (yield 1)
assert_is_value(x, TypedValue(str))
(yield x) |
_vcs_handler('git', 'keywords')
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if (not keywords):
raise NotThisMethod('no keywords at all, weird')
refnames = keywords['refnames'].strip()
if refnames.startswith('$Format'):
if verbose:
print('keywords are unexpanded, not using')
raise NotThisMethod('unexpanded keywords, not a git-archive tarball')
refs = set([r.strip() for r in refnames.strip('()').split(',')])
TAG = 'tag: '
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if (not tags):
tags = set([r for r in refs if re.search('\\d', r)])
if verbose:
print(("discarding '%s', no digits" % ','.join((refs - tags))))
if verbose:
print(('likely tags: %s' % ','.join(sorted(tags))))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print(('picking %s' % r))
return {'version': r, 'full-revisionid': keywords['full'].strip(), 'dirty': False, 'error': None}
if verbose:
print('no suitable tags, using unknown + full revision id')
return {'version': '0+unknown', 'full-revisionid': keywords['full'].strip(), 'dirty': False, 'error': 'no suitable tags'} |
class wide_basic(nn.Module):
def __init__(self, in_channels, channels, dropout_rate, params, stride=1):
super(wide_basic, self).__init__()
add_output = params[0]
num_classes = params[1]
input_size = params[2]
self.output_id = params[3]
self.depth = 2
self.layers = nn.ModuleList()
conv_layer = []
conv_layer.append(nn.BatchNorm2d(in_channels))
conv_layer.append(nn.ReLU(inplace=True))
conv_layer.append(nn.Conv2d(in_channels, channels, kernel_size=3, padding=1, bias=True))
conv_layer.append(nn.Dropout(p=dropout_rate))
conv_layer.append(nn.BatchNorm2d(channels))
conv_layer.append(nn.ReLU(inplace=True))
conv_layer.append(nn.Conv2d(channels, channels, kernel_size=3, stride=stride, padding=1, bias=True))
self.layers.append(nn.Sequential(*conv_layer))
shortcut = nn.Sequential()
if ((stride != 1) or (in_channels != channels)):
shortcut = nn.Sequential(nn.Conv2d(in_channels, channels, kernel_size=1, stride=stride, bias=True))
self.layers.append(shortcut)
if add_output:
self.output = af.InternalClassifier(input_size, channels, num_classes)
self.no_output = False
else:
self.output = None
self.forward = self.only_forward
self.no_output = True
def only_output(self, x):
fwd = self.layers[0](x)
fwd = (fwd + self.layers[1](x))
out = self.output(fwd)
return out
def only_forward(self, x):
fwd = self.layers[0](x)
fwd = (fwd + self.layers[1](x))
return (fwd, 0, None)
def forward(self, x):
fwd = self.layers[0](x)
fwd = (fwd + self.layers[1](x))
return (fwd, 1, self.output(fwd)) |
class Migration(migrations.Migration):
dependencies = [('users', '0007_auto__1555')]
operations = [migrations.AlterField(model_name='user', name='email', field=models.EmailField(max_length=254, verbose_name='email address', blank=True)), migrations.AlterField(model_name='user', name='groups', field=models.ManyToManyField(verbose_name='groups', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_query_name='user', blank=True, to='auth.Group', related_name='user_set')), migrations.AlterField(model_name='user', name='last_login', field=models.DateTimeField(verbose_name='last login', null=True, blank=True)), migrations.AlterField(model_name='user', name='username', field=models.CharField(max_length=30, verbose_name='username', help_text='Required. 30 characters or fewer. Letters, digits and /./+/-/_ only.', unique=True, error_messages={'unique': 'A user with that username already exists.'}, validators=[django.core.validators.RegexValidator('^[\\w.+-]+$', 'Enter a valid username. This value may contain only letters, numbers and /./+/-/_ characters.', 'invalid')]))] |
class Conv2dSame(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) |
def render_event(events_definition, state, fn_prefix):
evname = state['evname']
ev_description = events_definition[evname]
parts = [fn_prefix(state)]
parts.append(ev_description['desc'])
for name in ev_description['update_names']:
if (name == 'evname'):
continue
parts.append((' %s: %s' % (name, state[name])))
extra_names = ev_description['other_fields']
if extra_names:
parts.append(' ;')
for name in extra_names:
parts.append((' %s: %s' % (name, state[name])))
text = ''.join(parts)
return text |
_api()
class buffer(Stream):
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, **kwargs):
self.queue = Queue(maxsize=n)
kwargs['ensure_io_loop'] = True
Stream.__init__(self, upstream, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
def cb(self):
while True:
(x, metadata) = (yield self.queue.get())
(yield self._emit(x, metadata=metadata))
self._release_refs(metadata) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.