code stringlengths 101 5.91M |
|---|
class TextClassificationDecoder(DecoderBase, TextClassificationDecoderMixin):
def __init__(self, config: TextClassificationDecoderConfig):
super().__init__()
self.idx2label = config.idx2label
self.dropout = CombinedDropout(*config.in_drop_rates)
self.hid2logit = torch.nn.Linear(config.in_dim, config.voc_dim)
reinit_layer_(self.hid2logit, 'sigmoid')
if config.agg_mode.lower().endswith('_pooling'):
self.aggregating = SequencePooling(mode=config.agg_mode.replace('_pooling', ''))
elif config.agg_mode.lower().endswith('_attention'):
self.aggregating = SequenceAttention(config.in_dim, scoring=config.agg_mode.replace('_attention', ''))
self.criterion = config.instantiate_criterion(reduction='none')
def forward(self, batch: Batch, full_hidden: torch.Tensor):
pooled_hidden = self.aggregating(self.dropout(full_hidden), mask=batch.mask)
logits = self.hid2logit(pooled_hidden)
return self.criterion(logits, batch.label_ids)
def decode(self, batch: Batch, full_hidden: torch.Tensor):
pooled_hidden = self.aggregating(full_hidden, mask=batch.mask)
logits = self.hid2logit(pooled_hidden)
return [self.idx2label[label_id] for label_id in logits.argmax(dim=(- 1)).cpu().tolist()] |
class QuerySetSelectField(fields.SelectFieldBase):
widget = widgets.Select()
def __init__(self, label=None, validators=None, queryset=None, get_label=None, allow_blank=False, blank_text='', **kwargs):
super(QuerySetSelectField, self).__init__(label, validators, **kwargs)
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if (queryset is not None):
self.queryset = queryset.all()
if (get_label is None):
self.get_label = (lambda x: x)
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
def _get_data(self):
if (self._formdata is not None):
for obj in self.queryset:
if (obj.pk == self._formdata):
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
(yield ('__None', self.blank_text, (self.data is None)))
for obj in self.queryset:
(yield (obj.pk, self.get_label(obj), (obj == self.data)))
def process_formdata(self, valuelist):
if valuelist:
if (valuelist[0] == '__None'):
self.data = None
else:
self._data = None
self._formdata = int(valuelist[0])
def pre_validate(self, form):
if ((not self.allow_blank) or (self.data is not None)):
for obj in self.queryset:
if (self.data == obj):
break
else:
raise ValidationError(self.gettext('Not a valid choice')) |
def _cluster_intersections(intersections, radius_threshold):
if (len(intersections) == 0):
return []
n = 1
while True:
km = KMeans(n)
assignments = km.fit_predict(intersections)
centers = [instantiators['point'](*p) for p in km.cluster_centers_]
radii = []
for (center_idx, center) in enumerate(centers):
curr_points = [p for (idx, p) in enumerate(intersections) if (assignments[idx] == center_idx)]
radius = _get_radius(center, curr_points)
radii.append(radius)
if (max(radii) <= radius_threshold):
return centers
else:
n += 1 |
class FilterableRequestsAuth(Protocol):
def apply_to(self, func: (MatcherFunc | None)=None, *, name: (FilterValue | None)=None, name_regex: (str | None)=None, method: (FilterValue | None)=None, method_regex: (str | None)=None, path: (FilterValue | None)=None, path_regex: (str | None)=None) -> FilterableRequestsAuth:
pass
def skip_for(self, func: (MatcherFunc | None)=None, *, name: (FilterValue | None)=None, name_regex: (str | None)=None, method: (FilterValue | None)=None, method_regex: (str | None)=None, path: (FilterValue | None)=None, path_regex: (str | None)=None) -> FilterableRequestsAuth:
pass |
class SmoothedValue():
def __init__(self, window_size=20):
self.window_size = window_size
self.reset()
def reset(self):
self.deque = deque(maxlen=self.window_size)
self.averaged_value_deque = deque(maxlen=self.window_size)
self.batch_sizes = deque(maxlen=self.window_size)
self.total_samples = 0
self.total = 0.0
self.count = 0
def update(self, value, batch_size):
self.deque.append((value * batch_size))
self.averaged_value_deque.append(value)
self.batch_sizes.append(batch_size)
self.count += 1
self.total_samples += batch_size
self.total += (value * batch_size)
def median(self):
d = torch.tensor(list(self.averaged_value_deque))
return d.median().item()
def avg(self):
d = torch.tensor(list(self.deque))
s = torch.tensor(list(self.batch_sizes))
return (d.sum().item() / s.sum().item())
def global_avg(self):
return (self.total / self.total_samples)
def get_latest(self):
return self.averaged_value_deque[(- 1)] |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--txt_file', type=str, help='Input plaintext file')
parser.add_argument('--label_file', type=str, default=None, help='Character-level label file')
parser.add_argument('--json_file', type=str, default=None, help='JSON file with pre-chunked units')
parser.add_argument('--mwt_json_file', type=str, default=None, help='JSON file for MWT expansions')
parser.add_argument('--conll_file', type=str, default=None, help='CoNLL file for output')
parser.add_argument('--dev_txt_file', type=str, help='(Train only) Input plaintext file for the dev set')
parser.add_argument('--dev_label_file', type=str, default=None, help='(Train only) Character-level label file for the dev set')
parser.add_argument('--dev_json_file', type=str, default=None, help='(Train only) JSON file with pre-chunked units for the dev set')
parser.add_argument('--dev_conll_gold', type=str, default=None, help='(Train only) CoNLL-U file for the dev set for early stopping')
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help='UD treebank shorthand')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--emb_dim', type=int, default=32, help='Dimension of unit embeddings')
parser.add_argument('--hidden_dim', type=int, default=64, help='Dimension of hidden units')
parser.add_argument('--conv_filters', type=str, default='1,9', help='Configuration of conv filters. ,, separates layers and , separates filter sizes in the same layer.')
parser.add_argument('--no-residual', dest='residual', action='store_false', help='Add linear residual connections')
parser.add_argument('--no-hierarchical', dest='hierarchical', action='store_false', help='"Hierarchical" RNN tokenizer')
parser.add_argument('--hier_invtemp', type=float, default=0.5, help='Inverse temperature used in propagating tokenization predictions between RNN layers')
parser.add_argument('--input_dropout', action='store_true', help='Dropout input embeddings as well')
parser.add_argument('--conv_res', type=str, default=None, help='Convolutional residual layers for the RNN')
parser.add_argument('--rnn_layers', type=int, default=1, help='Layers of RNN in the tokenizer')
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Maximum gradient norm to clip to')
parser.add_argument('--anneal', type=float, default=0.999, help='Anneal the learning rate by this amount when dev performance deteriorate')
parser.add_argument('--anneal_after', type=int, default=2000, help='Anneal the learning rate no earlier than this step')
parser.add_argument('--lr0', type=float, default=0.002, help='Initial learning rate')
parser.add_argument('--dropout', type=float, default=0.33, help='Dropout probability')
parser.add_argument('--unit_dropout', type=float, default=0.33, help='Unit dropout probability')
parser.add_argument('--tok_noise', type=float, default=0.02, help='Probability to induce noise to the input of the higher RNN')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay')
parser.add_argument('--max_seqlen', type=int, default=100, help='Maximum sequence length to consider at a time')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size to use')
parser.add_argument('--epochs', type=int, default=10, help='Total epochs to train the model for')
parser.add_argument('--steps', type=int, default=20000, help='Steps to train the model for, if unspecified use epochs')
parser.add_argument('--report_steps', type=int, default=20, help='Update step interval to report loss')
parser.add_argument('--shuffle_steps', type=int, default=100, help='Step interval to shuffle each paragragraph in the generator')
parser.add_argument('--eval_steps', type=int, default=200, help='Step interval to evaluate the model on the dev set for early stopping')
parser.add_argument('--save_name', type=str, default=None, help='File name to save the model')
parser.add_argument('--load_name', type=str, default=None, help='File name to load a saved model')
parser.add_argument('--save_dir', type=str, default='saved_models/tokenize', help='Directory to save models in')
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA and run on CPU.')
parser.add_argument('--seed', type=int, default=1234)
args = parser.parse_args()
return args |
def dosig(node):
if (node is None):
return 'self'
else:
non_keyword_args = (node.args.posonlyargs + node.args.args)
argnames = [x.arg for x in (non_keyword_args + node.args.kwonlyargs)]
defaults = [('=' + tostr(x)) for x in (node.args.defaults + node.args.kw_defaults) if (x is not None)]
defaults = (([''] * (len(argnames) - len(defaults))) + defaults)
rendered = [(x + y) for (x, y) in zip(argnames, defaults)]
if (node.args.vararg is not None):
rendered.insert(len(non_keyword_args), f'*{node.args.vararg.arg}')
elif node.args.kwonlyargs:
rendered.insert(len(non_keyword_args), '*')
return ', '.join(rendered) |
class WikiExtractor(object):
def __init__(self, lowercase=True, min_paragraph_len=20):
self._lowercase = lowercase
self._min_paragraph_len = min_paragraph_len
self._tokenizer = RegexpTokenizer()
def extract_paragraphs(self, page):
paragraphs = []
cur_text = []
cur_words = []
cur_links = []
if page.is_redirect:
return []
for node in self._parse_page(page).nodes:
if isinstance(node, mwparserfromhell.nodes.Text):
for (n, paragraph) in enumerate(unicode(node).split('\n')):
words = self._extract_words(paragraph)
if (n == 0):
cur_text.append(paragraph)
cur_words += words
else:
paragraphs.append(Paragraph(u' '.join(cur_text), cur_words, cur_links))
cur_text = [paragraph]
cur_words = words
cur_links = []
elif isinstance(node, mwparserfromhell.nodes.Wikilink):
title = node.title.strip_code()
if (not title):
continue
if node.text:
text = node.text.strip_code()
else:
text = node.title.strip_code()
cur_text.append(text)
words = self._extract_words(text)
start = len(cur_words)
cur_words += words
end = len(cur_words)
cur_links.append(WikiLink(self._normalize_title(title), text, (start, end)))
elif isinstance(node, mwparserfromhell.nodes.Tag):
if (node.tag not in ('b', 'i')):
continue
if (not node.contents):
continue
text = node.contents.strip_code()
cur_text.append(text)
cur_words += self._extract_words(text)
return [p for p in paragraphs if (p.words and (p.words[0] not in ('|', '!', '{')) and (len(p.words) >= self._min_paragraph_len))]
def _extract_words(self, text):
tokens = self._tokenizer.tokenize(text)
if self._lowercase:
words = [token.text.lower() for token in tokens]
else:
words = [token.text for token in tokens]
return words
def _parse_page(self, page):
try:
return mwparserfromhell.parse(page.wiki_text)
except Exception:
logger.exception('Failed to parse wiki text: %s', page.title)
return mwparserfromhell.parse('')
def _normalize_title(self, title):
return (title[0].upper() + title[1:]).replace('_', ' ') |
class LargestNConnectedComponents(pymia_fltr.Filter):
def __init__(self, number_of_components: int=1, consecutive_component_labels: bool=False):
super().__init__()
if (not (number_of_components >= 1)):
raise ValueError('number_of_components must be larger or equal to 1')
self.number_of_components = number_of_components
self.consecutive_component_labels = consecutive_component_labels
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
image = sitk.ConnectedComponent(image)
image = sitk.RelabelComponent(image)
if self.consecutive_component_labels:
return sitk.Threshold(image, lower=1, upper=self.number_of_components, outsideValue=0)
else:
return sitk.BinaryThreshold(image, lowerThreshold=1, upperThreshold=self.number_of_components, insideValue=1, outsideValue=0)
def __str__(self):
return 'LargestNConnectedComponents:\n number_of_components: {self.number_of_components}\n consecutive_component_labels: {self.consecutive_component_labels}\n'.format(self=self) |
def ngb_matrix(U, N):
user = load_users(U)
server = load_servers(N)
neighbourhood = np.zeros([U, N])
for u in range(0, U):
for n in range(0, N):
if server.iloc[n].geometry.contains(user.iloc[u].geometry):
neighbourhood[(u, n)] = 1
else:
neighbourhood[(u, n)] = 0
return (neighbourhood, user, server) |
class PreFilter():
def filter(d: pd.DataFrame, ns: SimpleNamespace) -> pd.DataFrame:
if (not hasattr(ns, 'prefiltering')):
return d
ns = ns.prefiltering
dataframe = d.copy()
for strategy in ns:
dataframe = PreFilter.single_filter(dataframe, strategy)
return dataframe
def single_filter(d: pd.DataFrame, ns: SimpleNamespace) -> pd.DataFrame:
strategy = getattr(ns, 'strategy', None)
data = d.copy()
if (strategy == 'global_threshold'):
threshold = getattr(ns, 'threshold', None)
if (threshold is not None):
if str(threshold).isdigit():
data = PreFilter.filter_ratings_by_threshold(data, threshold)
elif (threshold == 'average'):
data = PreFilter.filter_ratings_by_global_average(data)
else:
raise Exception('Threshold value not recognized')
else:
raise Exception('Threshold option is missing')
elif (strategy == 'user_average'):
data = PreFilter.filter_ratings_by_user_average(data)
elif (strategy == 'user_k_core'):
core = getattr(ns, 'core', None)
if (core is not None):
if str(core).isdigit():
data = PreFilter.filter_users_by_profile_size(data, core)
else:
raise Exception('Core option is not a digit')
else:
raise Exception('Core option is missing')
elif (strategy == 'item_k_core'):
core = getattr(ns, 'core', None)
if (core is not None):
if str(core).isdigit():
data = PreFilter.filter_items_by_popularity(data, core)
else:
raise Exception('Core option is not a digit')
else:
raise Exception('Core option is missing')
elif (strategy == 'iterative_k_core'):
core = getattr(ns, 'core', None)
if (core is not None):
if str(core).isdigit():
data = PreFilter.filter_iterative_k_core(data, core)
else:
raise Exception('Core option is not a digit')
else:
raise Exception('Core option is missing')
elif (strategy == 'n_rounds_k_core'):
core = getattr(ns, 'core', None)
n_rounds = getattr(ns, 'rounds', None)
if ((core is not None) and (n_rounds is not None)):
if (str(core).isdigit() and str(n_rounds).isdigit()):
data = PreFilter.filter_rounds_k_core(data, core, n_rounds)
else:
raise Exception('Core or rounds options are not digits')
else:
raise Exception('Core or rounds options are missing')
elif (strategy == 'cold_users'):
threshold = getattr(ns, 'threshold', None)
if (threshold is not None):
if str(threshold).isdigit():
data = PreFilter.filter_retain_cold_users(data, threshold)
else:
raise Exception('Threshold option is not a digit')
else:
raise Exception('Threshold option is missing')
else:
raise Exception('Misssing strategy')
return data
def filter_ratings_by_global_average(d: pd.DataFrame) -> pd.DataFrame:
data = d.copy()
threshold = data['rating'].mean()
print('\nPrefiltering with Global Average')
print(f'The rating average is {round(threshold, 1)}')
print(f"The transactions above threshold are {data[(data['rating'] >= threshold)]['rating'].count()}")
print(f"The transactions below threshold are {data[(data['rating'] < threshold)]['rating'].count()}")
return data[(data['rating'] >= threshold)]
def filter_ratings_by_threshold(d: pd.DataFrame, threshold) -> pd.DataFrame:
data = d.copy()
print('\nPrefiltering with fixed threshold')
print(f'The rating threshold is {round(threshold, 1)}')
print(f"The transactions above threshold are {data[(data['rating'] >= threshold)]['rating'].count()}")
print(f'''The transactions below threshold are {data[(data['rating'] < threshold)]['rating'].count()}
''')
return data[(data['rating'] >= threshold)]
def filter_ratings_by_user_average(d: pd.DataFrame) -> pd.DataFrame:
data = d.copy()
user_groups = data.groupby(['userId'])
for (name, group) in user_groups:
threshold = group['rating'].mean()
data.loc[(group.index, 'accept_flag')] = (data.loc[(group.index, 'rating')] >= threshold)
print('\nPrefiltering with user average')
print(f"The transactions above threshold are {data[data['accept_flag']]['rating'].count()}")
print(f'''The transactions below threshold are {data[(data['accept_flag'] == False)]['rating'].count()}
''')
return data[(data['accept_flag'] == True)].drop(columns=['accept_flag']).reset_index(drop=True)
def filter_users_by_profile_size(d: pd.DataFrame, threshold) -> pd.DataFrame:
data = d.copy()
print(f'''
Prefiltering with user {threshold}-core''')
print(f'The transactions before filtering are {len(data)}')
print(f"The users before filtering are {data['userId'].nunique()}")
user_groups = data.groupby(['userId'])
data = user_groups.filter((lambda x: (len(x) >= threshold)))
print(f'The transactions after filtering are {len(data)}')
print(f"The users after filtering are {data['userId'].nunique()}")
return data
def filter_items_by_popularity(d: pd.DataFrame, threshold) -> pd.DataFrame:
data = d.copy()
print(f'''
Prefiltering with item {threshold}-core''')
print(f'The transactions before filtering are {len(data)}')
print(f"The items before filtering are {data['itemId'].nunique()}")
item_groups = data.groupby(['itemId'])
data = item_groups.filter((lambda x: (len(x) >= threshold)))
print(f'The transactions after filtering are {len(data)}')
print(f"The items after filtering are {data['itemId'].nunique()}")
return data
def filter_iterative_k_core(d: pd.DataFrame, threshold) -> pd.DataFrame:
data = d.copy()
check_var = True
original_length = len(data)
print('\n')
print(f'Iterative {threshold}-core')
while check_var:
data = PreFilter.filter_users_by_profile_size(data, threshold)
data = PreFilter.filter_items_by_popularity(data, threshold)
new_length = len(data)
if (original_length == new_length):
check_var = False
else:
original_length = new_length
print('\n')
return data
def filter_rounds_k_core(d: pd.DataFrame, threshold, n_rounds) -> pd.DataFrame:
data = d.copy()
print('\n')
print(f'{n_rounds} rounds of user/item {threshold}-core')
for i in range(n_rounds):
print(f'Iteration: {i}')
data = PreFilter.filter_users_by_profile_size(data, threshold)
data = PreFilter.filter_items_by_popularity(data, threshold)
print('\n')
return data
def filter_retain_cold_users(d: pd.DataFrame, threshold) -> pd.DataFrame:
data = d.copy()
print(f'''
Prefiltering retaining cold users with {threshold} or less ratings''')
print(f'The transactions before filtering are {len(data)}')
print(f"The users before filtering are {data['userId'].nunique()}")
user_groups = data.groupby(['userId'])
data = user_groups.filter((lambda x: (len(x) <= threshold)))
print(f'The transactions after filtering are {len(data)}')
print(f"The users after filtering are {data['userId'].nunique()}")
return data |
class ConcatSampler(Sampler):
def __init__(self, concat_dataset: ConcatDataset, samples_per_dataset: int):
assert isinstance(concat_dataset, ConcatDataset)
self.concat_dataset = concat_dataset
self.nb_datasets = len(concat_dataset.datasets)
self.samples_per_dataset = samples_per_dataset
weight = torch.tensor([len(d) for d in concat_dataset.datasets]).float()
self.weight = (weight / weight.sum())
def sample_dataset(self):
return torch.multinomial(self.weight, 1, replacement=True).item()
def __iter__(self):
iterators = [iter(RandomSampler(d)) for d in self.concat_dataset.datasets]
done = np.array(([False] * self.nb_datasets))
while (not done.all()):
dataset_id = self.sample_dataset()
if done[dataset_id]:
continue
batch = []
for _ in range(self.samples_per_dataset):
try:
idx = next(iterators[dataset_id])
except StopIteration:
done[dataset_id] = True
break
if (dataset_id > 0):
idx += self.concat_dataset.cumulative_sizes[(dataset_id - 1)]
batch.append(idx)
if (len(batch) == self.samples_per_dataset):
(yield from batch)
def __len__(self):
n = self.samples_per_dataset
return sum([((len(d) // n) * n) for d in self.concat_dataset.datasets]) |
def block_inception_c(inputs, scope=None, reuse=None):
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) |
def get_local_rank() -> int:
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
assert (_LOCAL_PROCESS_GROUP is not None)
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) |
def result_info(tool_id, finding):
fname = finding['name']
info_finding = sb.tools.info_finding(tool_id, fname)
result_dict = {'ruleId': rule_id(tool_id, fname), 'locations': [{'physicalLocation': {'artifactLocation': {'uri': finding['filename']}}}]}
v = result_message(finding, info_finding)
if v:
result_dict['message'] = {'text': v}
v = result_level(finding)
if v:
result_dict['level'] = v
v = result_region(finding)
if v:
result_dict['locations'][0]['physicalLocation']['region'] = v
v = result_location_message(finding)
if v:
result_dict['locations'][0]['message'] = {'text': v}
return result_dict |
.pure
def test_onnx_return_scalars(gpu, sdfg_name):
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [5])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [])
node_def = helper.make_node('ReduceSum', ['X'], ['Y'], keepdims=0)
graph_def = helper.make_graph([node_def], 'test-scalar-return', [X], [Y])
model_def = helper.make_model(graph_def)
checker.check_model(model_def)
dace_model = donnx.ONNXModel(sdfg_name, model_def, cuda=gpu)
inp = copy_to_gpu(gpu, torch.arange(5).type(torch.float32))
result = dace_model(inp)
assert (result.shape == ())
assert (result[()] == (((1 + 2) + 3) + 4)) |
class Pipeline(BaseSKMObject):
_estimator_type = 'pipeline'
def __init__(self, steps):
super().__init__()
self.steps = tosequence(steps)
self.active = False
self.__configure()
def __configure(self):
self._validate_steps()
def predict(self, X):
Xt = X
for (name, transform) in self.steps[:(- 1)]:
if (transform is not None):
Xt = transform.transform(Xt)
return self.steps[(- 1)][(- 1)].predict(Xt)
def fit(self, X, y):
Xt = X
for (name, transform) in self.steps[:(- 1)]:
if (transform is None):
pass
if hasattr(transform, 'fit_transform'):
Xt = transform.fit_transform(Xt, y)
else:
Xt = transform.fit(Xt, y).transform(Xt)
if (self._final_estimator is not None):
self._final_estimator.fit(Xt, y)
return self
def partial_fit(self, X, y, classes=None):
Xt = X
for (name, transform) in self.steps[:(- 1)]:
if (transform is None):
pass
if hasattr(transform, 'fit_transform'):
Xt = transform.partial_fit_transform(Xt, y, classes=classes)
else:
Xt = transform.partial_fit(Xt, y, classes=classes).transform(Xt)
if (self._final_estimator is not None):
if ('classes' in self._final_estimator.partial_fit.__code__.co_varnames):
self._final_estimator.partial_fit(X=Xt, y=y, classes=classes)
else:
self._final_estimator.partial_fit(X=Xt, y=y)
return self
def partial_fit_predict(self, X, y):
Xt = X
for (name, transform) in self.steps[:(- 1)]:
if (transform is None):
pass
if hasattr(transform, 'partial_fit_transform'):
Xt = transform.partial_fit_transform(Xt, y)
else:
Xt = transform.partial_fit(Xt, y).transform(Xt)
if hasattr(self._final_estimator, 'partial_fit_predict'):
return self._final_estimator.partial_fit_predict(Xt, y)
else:
return self._final_estimator.partial_fit(Xt, y).predict(Xt)
def partial_fit_transform(self, X, y=None):
raise NotImplementedError
def _validate_steps(self):
(names, estimators) = zip(*self.steps)
classifier = estimators[(- 1)]
transforms = estimators[:(- 1)]
self.active = True
for t in transforms:
if (t is None):
continue
elif ((not (hasattr(t, 'fit') or hasattr(t, 'fit_transform'))) or (not hasattr(t, 'transform'))):
self.active = False
raise TypeError('All intermediate steps, including an evaluator, should implement fit and transform.')
if ((classifier is not None) and (not hasattr(classifier, 'partial_fit'))):
self.active = False
def named_steps(self):
return dict(self.steps)
def get_info(self):
info = 'Pipeline:\n['
(names, estimators) = zip(*self.steps)
learner = estimators[(- 1)]
transforms = estimators[:(- 1)]
i = 0
for t in transforms:
try:
if (t.get_info() is not None):
info += t.get_info()
info += '\n'
else:
info += 'Transform: no info available'
except NotImplementedError:
info += 'Transform: no info available'
i += 1
if (learner is not None):
try:
if hasattr(learner, 'get_info'):
info += learner.get_info()
else:
info += 'Learner: no info available'
except NotImplementedError:
info += 'Learner: no info available'
info += ']'
return info
def _final_estimator(self):
return self.steps[(- 1)][(- 1)] |
def line_search_wolfe1(f, fprime, xk, pk, gfk=None, old_fval=None, old_old_fval=None, args=(), c1=0.0001, c2=0.9, amax=50, amin=1e-08, xtol=1e-14):
if (gfk is None):
gfk = fprime(xk, *args)
gval = [gfk]
gc = [0]
fc = [0]
def phi(s):
fc[0] += 1
return f((xk + (s * pk)), *args)
def derphi(s):
gval[0] = fprime((xk + (s * pk)), *args)
gc[0] += 1
return np.dot(gval[0], pk)
derphi0 = np.dot(gfk, pk)
(stp, fval, old_fval) = scalar_search_wolfe1(phi, derphi, old_fval, old_old_fval, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol)
return (stp, fc[0], gc[0], fval, old_fval, gval[0]) |
def BuildAdj(vtoi, deps):
itov = {v: k for (k, v) in vtoi.items()}
adjs = {}
conn_adjs = {}
for (k, v) in itov.items():
(adjs[k], conn_adjs[k]) = ({}, {})
src = itov[k]
for tup in deps:
if (tup[0] == src):
(tgt_k, arc) = (vtoi[tup[1]], tup[(- 1)])
(adjs[k][tgt_k], conn_adjs[k][tgt_k]) = ([arc], 1)
if ((k + 1) < len(itov)):
conn_adjs[k][(k + 1)] = 1
if ((k + 1) in adjs[k]):
adjs[k][(k + 1)].append('neighbor')
else:
adjs[k][(k + 1)] = ['neighbor']
if (k > 0):
conn_adjs[k][(k - 1)] = 1
if ((k - 1) in adjs[k]):
adjs[k][(k - 1)].append('neighbor')
else:
adjs[k][(k - 1)] = ['neighbor']
conn_adjs[k][k] = 1
return (adjs, conn_adjs) |
.parametrize('forward_output', (True, False))
.parametrize('backend', ('hdf5', 'netcdf4'))
.parametrize('as_scalar', (True, False))
def test_mixed_3D(backend, forward_output, as_scalar):
if (((backend == 'netcdf4') and (forward_output is True)) or skip[backend]):
return
K0 = FunctionSpace(N[0], 'F', dtype='D', domain=(0, np.pi))
K1 = FunctionSpace(N[1], 'F', dtype='d', domain=(0, (2 * np.pi)))
K2 = FunctionSpace(N[2], 'C')
T = TensorProductSpace(comm, (K0, K1, K2))
TT = VectorSpace(T)
filename = 'test3Dm_{}'.format(ex[forward_output])
hfile = writer(filename, TT, backend=backend)
uf = (Function(TT, val=2) if forward_output else Array(TT, val=2))
uf[0] = 1
data = {'ux': (uf[0], (uf[0], [slice(None), 4, slice(None)]), (uf[0], [slice(None), 4, 4])), 'uy': (uf[1], (uf[1], [slice(None), 4, slice(None)]), (uf[1], [slice(None), 4, 4])), 'u': [uf, (uf, [slice(None), 4, slice(None)])]}
hfile.write(0, data, as_scalar=as_scalar)
hfile.write(1, data, as_scalar=as_scalar)
if ((not forward_output) and (backend == 'hdf5') and (comm.Get_rank() == 0)):
generate_xdmf((filename + '.h5'))
if (as_scalar is False):
u0 = (Function(TT) if forward_output else Array(TT))
read = reader(filename, TT, backend=backend)
read.read(u0, 'u', step=1)
assert np.allclose(u0, uf)
else:
u0 = (Function(T) if forward_output else Array(T))
read = reader(filename, T, backend=backend)
read.read(u0, 'u0', step=1)
assert np.allclose(u0, uf[0])
T.destroy()
cleanup() |
class AttentionPool(nn.Module):
def __init__(self, inputdim, outputdim=10, pooldim=1, **kwargs):
super().__init__()
self.inputdim = inputdim
self.outputdim = outputdim
self.pooldim = pooldim
self.transform = nn.Linear(inputdim, outputdim)
self.activ = nn.Softmax(dim=pooldim)
self.eps = 1e-07
def forward(self, logits, decision):
w = self.activ(self.transform(logits))
detect = ((decision * w).sum(self.pooldim) / w.sum(self.pooldim))
return detect |
class SingleProcessRamTensorStorage(SingleProcessTensorStorage):
def __init__(self, data_schema: Dict[(str, SizeData)], buf: io.BytesIO):
super().__init__(data_schema, buf) |
class config(old_config):
old_config.user_options += [('fcompiler=', None, 'specify the Fortran compiler type')]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def _check_compiler(self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if ((sys.platform == 'win32') and (self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'))):
if (not self.compiler.initialized):
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = (textwrap.dedent(' Could not initialize compiler instance: do you have Visual Studio\n installed? If you are trying to build with MinGW, please use "python setup.py\n build -c mingw32" instead. If you have Visual Studio installed, check it is\n correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,\n VS 2010 for >= 3.3).\n\n Original exception was: %s, and the Compiler class was %s\n ') % (e, self.compiler.__class__.__name__))
print(textwrap.dedent(' '))
raise distutils.errors.DistutilsPlatformError(msg)
from distutils import msvc9compiler
if (msvc9compiler.get_build_version() >= 10):
for ldflags in [self.compiler.ldflags_shared, self.compiler.ldflags_shared_debug]:
if ('/MANIFEST' not in ldflags):
ldflags.append('/MANIFEST')
if (not isinstance(self.fcompiler, FCompiler)):
self.fcompiler = new_fcompiler(compiler=self.fcompiler, dry_run=self.dry_run, force=1, c_compiler=self.compiler)
if (self.fcompiler is not None):
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if (lang in ['f77', 'f90']):
self.compiler = self.fcompiler
try:
ret = mth(*((self,) + args))
except (DistutilsExecError, CompileError):
str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile(self, body, headers, include_dirs, lang):
(src, obj) = self._wrap_method(old_config._compile, lang, (body, headers, include_dirs, lang))
self.temp_files.append((obj + '.d'))
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs, lang):
if (self.compiler.compiler_type == 'msvc'):
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if (lang in ['f77', 'f90']):
lang = 'c'
if self.fcompiler:
for d in (self.fcompiler.library_dirs or []):
if d.startswith('/usr/lib'):
try:
d = subprocess.check_output(['cygpath', '-w', d])
except (OSError, subprocess.CalledProcessError):
pass
else:
d = filepath_from_subprocess_output(d)
library_dirs.append(d)
for libname in (self.fcompiler.libraries or []):
if (libname not in libraries):
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'):
continue
fileexists = False
for libdir in (library_dirs or []):
libfile = os.path.join(libdir, ('%s.lib' % libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists:
continue
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, ('lib%s.a' % libname))
if os.path.isfile(libfile):
libfile2 = os.path.join(libdir, ('%s.lib' % libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists:
continue
log.warn(('could not find library %r in directories %s' % (libname, library_dirs)))
elif (self.compiler.compiler_type == 'mingw32'):
generate_manifest(self)
return self._wrap_method(old_config._link, lang, (body, headers, include_dirs, libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile('/* we need a dummy line to make distutils happy */', [header], include_dirs)
def check_decl(self, symbol, headers=None, include_dirs=None):
self._check_compiler()
body = (textwrap.dedent('\n int main(void)\n {\n #ifndef %s\n (void) %s;\n #endif\n ;\n return 0;\n }') % (symbol, symbol))
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol, headers=None, include_dirs=None):
self._check_compiler()
body = (textwrap.dedent('\n int main(void)\n {\n #if %s\n #else\n #error false or undefined macro\n #endif\n ;\n return 0;\n }') % (symbol,))
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None, library_dirs=None):
self._check_compiler()
body = (textwrap.dedent('\n int main(void) {\n if ((%(name)s *) 0)\n return 0;\n if (sizeof (%(name)s))\n return 0;\n }\n ') % {'name': type_name})
st = False
try:
try:
self._compile((body % {'type': type_name}), headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
self._check_compiler()
body = textwrap.dedent('\n typedef %(type)s npy_check_sizeof_type;\n int main (void)\n {\n static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];\n test_array [0] = 0\n\n ;\n return 0;\n }\n ')
self._compile((body % {'type': type_name}), headers, include_dirs, 'c')
self._clean()
if expected:
body = textwrap.dedent('\n typedef %(type)s npy_check_sizeof_type;\n int main (void)\n {\n static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];\n test_array [0] = 0\n\n ;\n return 0;\n }\n ')
for size in expected:
try:
self._compile((body % {'type': type_name, 'size': size}), headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
body = textwrap.dedent('\n typedef %(type)s npy_check_sizeof_type;\n int main (void)\n {\n static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];\n test_array [0] = 0\n\n ;\n return 0;\n }\n ')
low = 0
mid = 0
while True:
try:
self._compile((body % {'type': type_name, 'size': mid}), headers, include_dirs, 'c')
self._clean()
break
except CompileError:
low = (mid + 1)
mid = ((2 * mid) + 1)
high = mid
while (low != high):
mid = (((high - low) // 2) + low)
try:
self._compile((body % {'type': type_name, 'size': mid}), headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = (mid + 1)
return low
def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None):
self._check_compiler()
body = []
if decl:
if (type(decl) == str):
body.append(decl)
else:
body.append(('int %s (void);' % func))
body.append('#ifdef _MSC_VER')
body.append(('#pragma function(%s)' % func))
body.append('#endif')
body.append('int main (void) {')
if call:
if (call_args is None):
call_args = ''
body.append((' %s(%s);' % (func, call_args)))
else:
body.append((' %s;' % func))
body.append(' return 0;')
body.append('}')
body = ('\n'.join(body) + '\n')
return self.try_link(body, headers, include_dirs, libraries, library_dirs)
def check_funcs_once(self, funcs, headers=None, include_dirs=None, libraries=None, library_dirs=None, decl=False, call=False, call_args=None):
self._check_compiler()
body = []
if decl:
for (f, v) in decl.items():
if v:
body.append(('int %s (void);' % f))
body.append('#ifdef _MSC_VER')
for func in funcs:
body.append(('#pragma function(%s)' % func))
body.append('#endif')
body.append('int main (void) {')
if call:
for f in funcs:
if ((f in call) and call[f]):
if (not (call_args and (f in call_args) and call_args[f])):
args = ''
else:
args = call_args[f]
body.append((' %s(%s);' % (f, args)))
else:
body.append((' %s;' % f))
else:
for f in funcs:
body.append((' %s;' % f))
body.append(' return 0;')
body.append('}')
body = ('\n'.join(body) + '\n')
return self.try_link(body, headers, include_dirs, libraries, library_dirs)
def check_inline(self):
return check_inline(self)
def check_restrict(self):
return check_restrict(self)
def check_compiler_gcc4(self):
return check_compiler_gcc4(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
def check_gcc_function_attribute_with_intrinsics(self, attribute, name, code, include):
return check_gcc_function_attribute_with_intrinsics(self, attribute, name, code, include)
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
def get_output(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang='c', use_tee=None):
warnings.warn('\n\nUsage of get_output is deprecated: please do not \nuse it anymore, and avoid configuration checks \ninvolving running executable on the target machine.\n\n', DeprecationWarning, stacklevel=2)
self._check_compiler()
(exitcode, output) = (255, '')
try:
grabber = GrabStdout()
try:
(src, obj, exe) = self._link(body, headers, include_dirs, libraries, library_dirs, lang)
grabber.restore()
except Exception:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
try:
output = subprocess.check_output([exe], cwd='.')
except subprocess.CalledProcessError as exc:
exitstatus = exc.returncode
output = ''
except OSError:
exitstatus = 127
output = ''
else:
output = filepath_from_subprocess_output(output)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error(('subprocess exited with signal %d' % (sig,)))
if (sig == signal.SIGINT):
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info('success!')
except (CompileError, LinkError):
log.info('failure.')
self._clean()
return (exitcode, output) |
def test_branch_coverage_half_branch(subject_properties_mock, trace_mock):
subject_properties_mock.existing_predicates[0] = MagicMock(PredicateMetaData)
trace_mock.true_distances[0] = 0.0
assert (ff.compute_branch_coverage(trace_mock, subject_properties_mock) == 0.5) |
class WifiLinkMonitor(object):
def __init__(self, dummy_viz):
self.access_points = {}
self.stations = []
def scan_nodes(self, viz):
for (sta_netdevice, viz_node, wifi_link) in self.stations:
wifi_link.destroy()
self.access_points = {}
self.stations = []
for node in viz.nodes.itervalues():
ns3_node = ns.network.NodeList.GetNode(node.node_index)
for devI in range(ns3_node.GetNDevices()):
dev = ns3_node.GetDevice(devI)
if (not isinstance(dev, ns.wifi.WifiNetDevice)):
continue
wifi_mac = dev.GetMac()
if isinstance(wifi_mac, ns.wifi.StaWifiMac):
wifi_link = WifiLink(viz.links_group, node, dev)
self.stations.append((dev, node, wifi_link))
elif isinstance(wifi_mac, ns.wifi.ApWifiMac):
bssid = ns.network.Mac48Address.ConvertFrom(dev.GetAddress())
self.access_points[str(bssid)] = node
def simulation_periodic_update(self, viz):
for (sta_netdevice, viz_node, wifi_link) in self.stations:
if (not sta_netdevice.IsLinkUp()):
wifi_link.set_ap(None)
continue
bssid = str(sta_netdevice.GetMac().GetBssid())
if (bssid == '00:00:00:00:00:00'):
wifi_link.set_ap(None)
continue
ap = self.access_points[bssid]
wifi_link.set_ap(ap)
def update_view(self, viz):
for (dummy_sta_netdevice, dummy_viz_node, wifi_link) in self.stations:
if (wifi_link is not None):
wifi_link.update_points() |
(frozen=True)
class RunConfiguration(HalfFrozenObject):
experiment_name: str = attr.ib(default=None)
eval_experiment_name: str = attr.ib(default=None)
experiment_directory: str = attr.ib(default=None)
eval_mode: str = attr.ib(default=None, validator=(lambda i, a, v: (v in ('default', 'dropout', 'ensemble', 'energy_scoring'))))
job: str = attr.ib(default=None, validator=(lambda i, a, v: (v in ('train', 'evaluate'))))
save_model: bool = attr.ib(default=None)
gpu: int = attr.ib(default=None, validator=(lambda i, a, v: (v in (0, False))))
num_inits: int = attr.ib(default=None)
num_splits: int = attr.ib(default=None)
log: bool = attr.ib(default=True)
debug: bool = attr.ib(default=True)
ex_type: str = attr.ib(default='transductive', validator=(lambda i, a, v: (v in ('transductive', 'transductive_ood'))))
ood_loc: bool = attr.ib(default=True)
ood_loc_only: bool = attr.ib(default=False)
ood_edge_perturbations: bool = attr.ib(default=True)
ood_isolated_perturbations: bool = attr.ib(default=False) |
def register_Ns3Ping6Helper_methods(root_module, cls):
cls.add_constructor([param('ns3::Ping6Helper const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Install', 'ns3::ApplicationContainer', [param('ns3::NodeContainer', 'c')])
cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
cls.add_method('SetIfIndex', 'void', [param('uint32_t', 'ifIndex')])
cls.add_method('SetLocal', 'void', [param('ns3::Ipv6Address', 'ip')])
cls.add_method('SetRemote', 'void', [param('ns3::Ipv6Address', 'ip')])
cls.add_method('SetRoutersAddress', 'void', [param('std::vector< ns3::Ipv6Address >', 'routers')])
return |
class tqdm_notebook(tqdm):
def status_printer(_, total=None, desc=None):
if total:
pbar = IntProgress(min=0, max=total)
else:
pbar = IntProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = 'info'
if desc:
pbar.description = desc
ptext = HTML()
container = HBox(children=[pbar, ptext])
display(container)
def print_status(s='', close=False, bar_style=None, desc=None):
if total:
if s:
npos = s.find('/|/')
if (npos >= 0):
n = int(s[:npos])
s = s[(npos + 3):]
if (n is not None):
pbar.value = n
if s:
s = s.replace('||', '')
s = escape(s)
ptext.value = s
if bar_style:
if (not ((pbar.bar_style == 'danger') and (bar_style == 'success'))):
pbar.bar_style = bar_style
if (close and (pbar.bar_style != 'danger')):
try:
container.close()
except AttributeError:
container.visible = False
if desc:
pbar.description = desc
return print_status
def __init__(self, *args, **kwargs):
if (kwargs.get('file', sys.stderr) is sys.stderr):
kwargs['file'] = sys.stdout
if (not kwargs.get('bar_format', None)):
kwargs['bar_format'] = '{n}/|/{l_bar}{r_bar}'
kwargs['gui'] = True
super(tqdm_notebook, self).__init__(*args, **kwargs)
if (self.disable or (not kwargs['gui'])):
return
self.sp = self.status_printer(self.fp, self.total, self.desc)
self.desc = None
if (not self.disable):
self.sp(self.__repr__())
def __iter__(self, *args, **kwargs):
try:
for obj in super(tqdm_notebook, self).__iter__(*args, **kwargs):
(yield obj)
except:
self.sp(bar_style='danger')
raise
def update(self, *args, **kwargs):
try:
super(tqdm_notebook, self).update(*args, **kwargs)
except Exception as exc:
self.sp(bar_style='danger')
raise exc
def close(self, *args, **kwargs):
super(tqdm_notebook, self).close(*args, **kwargs)
if hasattr(self, 'sp'):
if (self.total and (self.n < self.total)):
self.sp(bar_style='danger')
elif self.leave:
self.sp(bar_style='success')
else:
self.sp(close=True)
def moveto(self, *args, **kwargs):
return
def set_description(self, desc=None, **_):
self.sp(desc=desc) |
def adapt_time_step(ts, status, adt, problem, verbose=False):
if (ts.time > 0.5):
ts.set_time_step(0.1)
return True |
_node_type()
class Overlap(optplan.Function):
type = schema_utils.polymorphic_model_type('function.overlap')
simulation = optplan.ReferenceType(optplan.Function)
overlap = optplan.ReferenceType(optplan.EmOverlap) |
.parametrize('module_creator', [ModuleCreator(TSTPureConv(), [(4, 3, 32, 32)])])
.parametrize('another_input_shape', [(1, 3, 64, 64), (1, 3, 80, 80)])
def test_another_shape_input(module_creator, another_input_shape):
module = module_creator.module
proto_variable_inputs = [nn.ProtoVariable(shape) for shape in module_creator.input_shape]
outputs = module(*proto_variable_inputs)
g = nn.graph_def.get_default_graph()
input = nn.Variable(another_input_shape)
output = g(input)
output.forward() |
def efficient_pwdist_gauss(M1, S1, M2=None, S2=None, sqrtS1=None, sqrtS2=None, symmetric=False, diagonal_cov=False, commute=False, sqrt_method='spectral', sqrt_niters=20, sqrt_pref=0, device='cpu', nworkers=1, cost_function='euclidean', return_dmeans=False, return_sqrts=False):
if (M2 is None):
symmetric = True
(M2, S2) = (M1, S1)
(n1, n2) = (len(M1), len(M2))
if symmetric:
pairs = list(itertools.combinations(range(n1), 2))
else:
pairs = list(itertools.product(range(n1), range(n2)))
D = torch.zeros((n1, n2), device=device, dtype=M1.dtype)
sqrtS = []
both_sqrt = ((sqrtS1 is not None) and (sqrtS2 is not None))
if ((both_sqrt and (sqrt_pref == 0)) or (sqrtS1 is not None)):
flip = False
sqrtS = sqrtS1
elif (sqrtS2 is not None):
if (sqrt_pref == 0):
logger.warning('sqrt_pref=0 but S1 not provided!')
flip = True
sqrtS = sqrtS2
elif (len(S1) <= len(S2)):
flip = False
S = S1
else:
flip = True
S = S2
if (not sqrtS):
logger.info('Precomputing covariance matrix square roots...')
for (i, ) in enumerate(S):
if diagonal_cov:
assert (.ndim == 1)
sqrtS.append(torch.sqrt())
else:
sqrtS.append((sqrtm() if (sqrt_method == 'spectral') else sqrtm_newton_schulz(, sqrt_niters)))
logger.info('Computing gaussian-to-gaussian wasserstein distances...')
for (i, j) in pairs:
if (not flip):
D[(i, j)] = wasserstein_gauss_distance(M1[i], M2[j], S1[i], S2[j], sqrtS[i], diagonal_cov=diagonal_cov, commute=commute, squared=True, cost_function=cost_function, sqrt_method=sqrt_method, sqrt_niters=sqrt_niters)
else:
D[(i, j)] = wasserstein_gauss_distance(M2[j], M1[i], S2[j], S1[i], sqrtS[j], diagonal_cov=diagonal_cov, commute=commute, squared=True, cost_function=cost_function, sqrt_method=sqrt_method, sqrt_niters=sqrt_niters)
if symmetric:
D[(j, i)] = D[(i, j)]
if return_dmeans:
D_means = torch.cdist(M1, M2)
if return_sqrts:
return (D, D_means, sqrtS)
else:
return (D, D_means)
elif return_sqrts:
return (D, sqrtS)
else:
return D |
def largest_available_k(n, t=2):
from .block_design import projective_plane
if (n < 0):
raise ValueError('n(={}) was expected to be >=0'.format(n))
if (t < 0):
raise ValueError('t(={}) was expected to be >=0'.format(t))
if ((n == 0) or (n == 1)):
from sage.rings.infinity import Infinity
return Infinity
elif (t == 2):
if (projective_plane(n, existence=True) is True):
return (n + 1)
else:
k = 1
while (_OA_cache_construction_available((k + 1), n) is True):
k = (k + 1)
else:
k = (t - 1)
while (orthogonal_array((k + 1), n, t, existence=True) is True):
k += 1
return k |
class Differential(UniqueRepresentation, Morphism, metaclass=InheritComparisonClasscallMetaclass):
def __classcall__(cls, A, im_gens):
if isinstance(im_gens, (list, tuple)):
im_gens = {A.gen(i): A(x) for (i, x) in enumerate(im_gens)}
else:
im_gens = {A(a): A(im_gens[a]) for a in im_gens}
I = A.defining_ideal()
def image_monomial(exponent):
i = 0
cexp = list(exponent)
ell = len(cexp)
while (i < ell):
if (not cexp[i]):
i += 1
continue
a = A.gen(i)
try:
da = im_gens[a]
except KeyError:
da = A.zero()
cexp[i] -= 1
b = A.prod(((A.gen(j) ** cexp[j]) for j in range(len(cexp))))
db = image_monomial(cexp)
im = ((da * b) + ((((- 1) ** A._degrees[i]) * a) * db))
return A(im)
return A.zero()
for g in I.gens():
d = g.dict()
res = A.sum(((d[ex] * image_monomial(ex)) for ex in d))
if (not res.is_zero()):
raise ValueError('the differential does not preserve the ideal')
for i in im_gens:
x = im_gens[i]
if ((not x.is_zero()) and ((not x.is_homogeneous()) or (total_degree(x.degree()) != (total_degree(i.degree()) + 1)))):
raise ValueError('the given dictionary does not determine a degree 1 map')
im_gens = tuple((im_gens.get(x, A.zero()) for x in A.gens()))
return super().__classcall__(cls, A, im_gens)
def __init__(self, A, im_gens):
self._dic_ = {A.gen(i): x for (i, x) in enumerate(im_gens)}
Morphism.__init__(self, Hom(A, A, category=Modules(A.base_ring())))
for i in A.gens():
if (not self(self(i)).is_zero()):
raise ValueError('the given dictionary does not determine a valid differential')
def _call_(self, x):
if x.is_zero():
return self.codomain().zero()
res = self.codomain().zero()
dic = x.dict()
for key in dic:
keyl = list(key)
coef = dic[key]
idx = 0
while keyl:
exp = keyl.pop(0)
if (exp > 0):
v1 = ((exp * self._dic_[x.parent().gen(idx)]) * (x.parent().gen(idx) ** (exp - 1)))
v2 = prod(((x.parent().gen(((i + idx) + 1)) ** keyl[i]) for i in range(len(keyl))))
res += ((coef * v1) * v2)
coef *= (((- 1) ** total_degree(x.parent()._degrees[idx])) * (x.parent().gen(idx) ** exp))
idx += 1
return res
def _repr_defn(self):
return '\n'.join((f'{i} --> {self(i)}' for i in self.domain().gens()))
def _repr_(self):
if (self.domain() is None):
return 'Defunct morphism'
s = 'Differential of {}'.format(self.domain()._base_repr())
s += ('\n Defn: ' + '\n '.join(self._repr_defn().split('\n')))
return s
_method
def differential_matrix(self, n):
A = self.domain()
dom = A.basis(n)
cod = A.basis((n + 1))
cokeys = [next(iter(a.lift().dict().keys())) for a in cod]
m = matrix(A.base_ring(), len(dom), len(cod))
for (i, domi) in enumerate(dom):
im = self(domi)
dic = im.lift().dict()
for j in dic.keys():
k = cokeys.index(j)
m[(i, k)] = dic[j]
m.set_immutable()
return m
def coboundaries(self, n):
A = self.domain()
F = A.base_ring()
if (n == 0):
return VectorSpace(F, 0)
if (n == 1):
V0 = VectorSpace(F, len(A.basis(1)))
return V0.subspace([])
M = self.differential_matrix((n - 1))
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.image()
def cocycles(self, n):
A = self.domain()
F = A.base_ring()
if (n == 0):
return VectorSpace(F, 1)
M = self.differential_matrix(n)
V0 = VectorSpace(F, M.nrows())
V1 = VectorSpace(F, M.ncols())
mor = V0.Hom(V1)(M)
return mor.kernel()
def cohomology_raw(self, n):
return self.cocycles(n).quotient(self.coboundaries(n))
def cohomology(self, n):
H = self.cohomology_raw(n)
H_basis_raw = (H.lift(H.basis()[i]) for i in range(H.dimension()))
A = self.domain()
B = A.basis(n)
H_basis = (sum(((c * b) for (c, b) in zip(coeffs, B))) for coeffs in H_basis_raw)
H_basis_brackets = [CohomologyClass(b, A) for b in H_basis]
return CombinatorialFreeModule(A.base_ring(), H_basis_brackets, sorting_key=sorting_keys, monomial_reverse=True)
homology = cohomology
def _is_nonzero(self):
return any(self._dic_.values()) |
class ShearX(DauphinTransform):
value_range = (0.0, 0.3)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, 'float')
if (random.random() > 0.5):
degree = (- degree)
return (pil_img.transform(pil_img.size, Image.AFFINE, (1, degree, 0, 0, 1, 0)), label) |
def write_output_files(output_file_name, primal_res, dual_res=None, psf_res=None, output_format='npy'):
if (output_format == 'fits'):
write_to_fits((output_file_name + '_primal.fits'), primal_res)
if (not isinstance(dual_res, type(None))):
write_to_fits((output_file_name + '_dual.fits'), dual_res)
if (not isinstance(psf_res, type(None))):
write_to_fits((output_file_name + '_psf.fits'), psf_res)
else:
np.save((output_file_name + '_primal'), primal_res)
if (not isinstance(dual_res, type(None))):
np.save((output_file_name + '_dual'), dual_res)
if (not isinstance(psf_res, type(None))):
np.save((output_file_name + '_psf'), psf_res) |
()
def schema(fastapi_app):
return from_asgi('/openapi.json', fastapi_app, force_schema_version='30') |
def get_completion_adapter_spec(instructions: str='', input_prefix: str='', output_prefix: str='', output_suffix: str='', max_train_instances: int=0, temperature: float=0.0, num_outputs: int=1, max_tokens: int=100, stop_sequences: Optional[List]=None, **kwargs) -> AdapterSpec:
if (stop_sequences is None):
stop_sequences = []
return AdapterSpec(method=ADAPT_GENERATION, instructions=format_instructions(instructions), input_prefix=input_prefix, input_suffix='', output_prefix=output_prefix, output_suffix=output_suffix, max_train_instances=max_train_instances, temperature=temperature, num_outputs=num_outputs, max_tokens=max_tokens, stop_sequences=stop_sequences, **kwargs) |
class TAPEVisualizer(ABC):
def __init__(self, log_dir: typing.Union[(str, Path)], exp_name: str, debug: bool=False):
raise NotImplementedError
def log_config(self, config: typing.Dict[(str, typing.Any)]) -> None:
raise NotImplementedError
def watch(self, model: nn.Module) -> None:
raise NotImplementedError
def log_metrics(self, metrics_dict: typing.Dict[(str, float)], split: str, step: int):
raise NotImplementedError |
class TestIterators(unittest.TestCase):
def test_counting_iterator(self):
x = list(range(10))
itr = iterators.CountingIterator(x)
self.assertTrue(itr.has_next())
self.assertEqual(next(itr), 0)
self.assertEqual(next(itr), 1)
itr.skip(3)
self.assertEqual(next(itr), 5)
itr.skip(3)
self.assertEqual(next(itr), 9)
self.assertFalse(itr.has_next()) |
_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['nll_loss'].avg), 3)))
else:
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['loss'].avg), 3)))
def logging_outputs_can_be_summed() -> bool:
return True |
class DistributedBackend():
BACKEND_MODULE_NAME = None
BACKEND_NAME = None
ROOT_RANK = 0
backend_module = None
is_initialized = False
def __init__(self):
if (self.BACKEND_MODULE_NAME is None):
raise NotImplementedError('BACKEND_MODULE_NAME is not set')
if (self.BACKEND_NAME is None):
raise NotImplementedError('BACKEND_NAME is not set')
def has_backend(self):
try:
self.backend_module = import_module(self.BACKEND_MODULE_NAME)
except ModuleNotFoundError:
return False
return True
def check_batch_size(self, batch_size):
assert (batch_size >= self.get_world_size()), f"batch size can't be smaller than number of processes ({batch_size} < {self.get_world_size()})"
def wrap_arg_parser(self, parser):
raise NotImplementedError
def initialize(self):
self._initialize()
self.is_initialized = True
def _initialize(self):
raise NotImplementedError
def require_init(self):
assert self.is_initialized, f'{BACKEND_NAME} backend has not been initialized; please call `distributed_utils.initialize` at the start of your script to allow optional distributed usage'
def get_world_size(self):
self.require_init()
return self._get_world_size()
def _get_world_size(self):
raise NotImplementedError
def get_rank(self):
self.require_init()
return self._get_rank()
def _get_rank(self):
raise NotImplementedError
def get_local_rank(self):
self.require_init()
return self._get_local_rank()
def _get_local_rank(self):
raise NotImplementedError
def is_root_worker(self):
return (self.get_rank() == self.ROOT_RANK)
def is_local_root_worker(self):
return (self.get_local_rank() == self.ROOT_RANK)
def local_barrier(self):
self.require_init()
self._local_barrier()
def _local_barrier(self):
raise NotImplementedError
def distribute(self, args=None, model=None, optimizer=None, model_parameters=None, training_data=None, lr_scheduler=None, **kwargs):
self.require_init()
return self._distribute(args, model, optimizer, model_parameters, training_data, lr_scheduler, **kwargs)
def _distribute(self, args=None, model=None, optimizer=None, model_parameters=None, training_data=None, lr_scheduler=None, **kwargs):
raise NotImplementedError
def average_all(self, tensor):
self.require_init()
return self._average_all(tensor)
def _average_all(self, tensor):
raise NotImplementedError |
def test_load_tags():
default_clipid = 'airport-lisbon-1000-40000-0-a'
dataset = tau2022uas_mobile.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
assert (len(clip.tags.labels) == 1)
assert (clip.tags.labels[0] == 'airport')
assert np.allclose([1.0], clip.tags.confidence)
eval_default_clipid = '0'
eval_clip = dataset.clip(eval_default_clipid)
assert (eval_clip.tags is None) |
class UCF101Dataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
self.ans_lab_dict = dict()
if (split == 'train'):
names = ['ucf101_train']
elif (split == 'val'):
names = ['ucf101_val']
elif (split == 'test'):
names = ['ucf101_test']
super().__init__(*args, **kwargs, names=names, text_column_name='questions', remove_duplicate=False)
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/ucf101'
split_files = {'train': 'hmdb51_rgb_train_split_1.txt', 'val': 'hmdb51_rgb_val_split_1.txt', 'test': 'hmdb51_rgb_val_split_1.txt'}
target_split_fp = split_files[self.split]
self.metadata = [x.strip().split(' ') for x in open(os.path.join(metadata_dir, target_split_fp))]
answer_fp = os.path.join(metadata_dir, 'hmdb51_classInd.txt')
with open(answer_fp, 'r') as f:
lines = f.readlines()
for line in lines:
self.ans_lab_dict[str((int(line.strip().split(' ')[0]) - 1))] = line.strip().split(' ')[1]
def _get_video_path(self, sample):
return ((os.path.join(self.data_dir, sample[0].split('/')[(- 1)]) + '.avi'), (sample[0].split('/')[(- 1)] + '.avi'))
def get_text(self, sample):
text = 'A person is doing [MASK]'
encoding = self.tokenizer(text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return (text, encoding)
def get_answer_label(self, sample):
text = 'None'
ans_total_len = (len(self.ans_lab_dict) + 1)
ans_label = int(sample[2])
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return (text, ans_label, scores)
def __getitem__(self, index):
sample = self.metadata[index]
video_tensor = self.get_video(sample)
text = self.get_text(sample)
qid = index
if (self.split != 'test'):
(answers, labels, scores) = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {'video': video_tensor, 'text': text, 'vqa_answer': answers, 'vqa_labels': labels, 'vqa_scores': scores, 'qid': qid}
def __len__(self):
return len(self.metadata) |
def test_power_two_range_stmt_non_interactive():
group_pair = BilinearGroupPair()
group = group_pair.G1
value = Secret(value=Bn(10))
randomizer = Secret(value=group.order().random())
(g, h) = make_generators(2, group)
limit = 20
com = ((value * g) + (randomizer * h))
p1 = PowerTwoRangeStmt(com.eval(), g, h, limit, value, randomizer)
p2 = PowerTwoRangeStmt(com.eval(), g, h, limit, Secret(), Secret())
tr = p1.prove()
assert p2.verify(tr) |
.operations('upload_file')
def test_cli_binary_body(cli, schema_url, hypothesis_max_examples):
result = cli.run(schema_url, '--hypothesis-suppress-health-check=filter_too_much', f'--hypothesis-max-examples={(hypothesis_max_examples or 1)}')
assert (result.exit_code == ExitCode.OK), result.stdout
assert (' HYPOTHESIS OUTPUT ' not in result.stdout) |
class BaseCalculator(HypotestsObject):
def __init__(self, input, minimizer):
super().__init__(input, minimizer)
self._obs_nll = {}
self._parameters = {}
for m in self.model:
for d in m.get_params():
self._parameters[d.name] = d
def obs_nll(self, pois: POIarray) -> np.ndarray:
ret = np.empty(pois.shape)
for (i, p) in enumerate(pois):
if (p not in self._obs_nll.keys()):
nll = pll(minimizer=self.minimizer, loss=self.loss, pois=p)
self._obs_nll[p] = nll
ret[i] = self._obs_nll[p]
return ret
def qobs(self, poinull: POI, onesided: bool=True, onesideddiscovery: bool=True, qtilde: bool=False):
self.check_pois(poinull)
if (poinull.ndim == 1):
param = poinull.parameter
bestfit = self.bestfit.params[param]['value']
if (qtilde and (bestfit < 0)):
bestfitpoi = POI(param, 0)
else:
bestfitpoi = POI(param, bestfit)
self._obs_nll[bestfitpoi] = self.bestfit.fmin
nll_bestfitpoi_obs = self.obs_nll(bestfitpoi)
nll_poinull_obs = self.obs_nll(poinull)
qobs = self.q(nll1=nll_poinull_obs, nll2=nll_bestfitpoi_obs, poi1=poinull, poi2=bestfitpoi, onesided=onesided, onesideddiscovery=onesideddiscovery)
return qobs
def pvalue(self, poinull: (POI | POIarray), poialt: (POI | None)=None, qtilde: bool=False, onesided: bool=True, onesideddiscovery: bool=False) -> tuple[(np.ndarray, np.ndarray)]:
self.check_pois(poinull)
if poialt:
self.check_pois(poialt)
self.check_pois_compatibility(poinull, poialt)
return self._pvalue_(poinull=poinull, poialt=poialt, qtilde=qtilde, onesided=onesided, onesideddiscovery=onesideddiscovery)
def _pvalue_(self, poinull, poialt, qtilde, onesided, onesideddiscovery):
raise NotImplementedError
def expected_pvalue(self, poinull: (POI | POIarray), poialt: (POI | POIarray), nsigma: list[int], CLs: bool=False, qtilde: bool=False, onesided: bool=True, onesideddiscovery: bool=False) -> list[np.array]:
self.check_pois(poinull)
if poialt:
self.check_pois(poialt)
self.check_pois_compatibility(poinull, poialt)
if (qtilde and (poialt.values < 0).any()):
poialt = POIarray(parameter=poialt.parameter, values=np.where((poialt.values < 0), 0, poialt.values))
return self._expected_pvalue_(poinull=poinull, poialt=poialt, nsigma=nsigma, CLs=CLs, qtilde=qtilde, onesided=onesided, onesideddiscovery=onesideddiscovery)
def _expected_pvalue_(self, poinull, poialt, nsigma, CLs, qtilde, onesided, onesideddiscovery):
raise NotImplementedError
def check_pois(pois: (POI | POIarray)):
msg = 'POI/POIarray is required.'
if (not isinstance(pois, POIarray)):
raise TypeError(msg)
if (pois.ndim > 1):
msg = 'Tests with more that one parameter of interest are not yet implemented.'
raise NotImplementedError(msg)
def check_pois_compatibility(poi1: (POI | POIarray), poi2: (POI | POIarray)):
if (poi1.ndim != poi2.ndim):
msg = f'POIs should have the same dimensions, poi1={poi1.ndim}, poi2={poi2.ndim}'
raise ValueError(msg)
if (poi1.ndim == 1):
if (poi1.name != poi2.name):
msg = 'The variables used in the parameters of interest should have the same names,'
msg += f' poi1={poi1.name}, poi2={poi2.name}'
raise ValueError(msg)
def q(self, nll1: np.array, nll2: np.array, poi1: POIarray, poi2: POIarray, onesided: bool=True, onesideddiscovery: bool=False) -> np.ndarray:
self.check_pois(poi1)
self.check_pois(poi2)
self.check_pois_compatibility(poi1, poi2)
assert (len(nll1) == len(poi1))
assert (len(nll2) == len(poi2))
poi1_values = poi1.values
poi2_values = poi2.values
q = (2 * (nll1 - nll2))
zeros = np.zeros(q.shape)
if onesideddiscovery:
condition = ((poi2_values < poi1_values) | (q < 0))
elif onesided:
condition = ((poi2_values > poi1_values) | (q < 0))
else:
condition = (q < 0)
q = np.where(condition, zeros, q)
return q |
class LALR_WithLexer(WithLexer):
def __init__(self, lexer_conf, parser_conf, options=None):
debug = (options.debug if options else False)
self.parser = LALR_Parser(parser_conf, debug=debug)
WithLexer.__init__(self, lexer_conf, parser_conf, options)
self.init_lexer()
def init_lexer(self, **kw):
raise NotImplementedError() |
def trieste_keras_ensemble_model(example_data: Dataset, ensemble_size: int, independent_normal: bool=False) -> KerasEnsemble:
(input_tensor_spec, output_tensor_spec) = get_tensor_spec_from_data(example_data)
networks = [GaussianNetwork(input_tensor_spec, output_tensor_spec, hidden_layer_args=[{'units': 32, 'activation': 'selu'}, {'units': 32, 'activation': 'selu'}], independent=independent_normal) for _ in range(ensemble_size)]
keras_ensemble = KerasEnsemble(networks)
return keras_ensemble |
class VolumetricConvolution(Module):
def __init__(self, nInputPlane, nOutputPlane, kT, kW, kH, dT=1, dW=1, dH=1, padT=0, padW=None, padH=None):
super(VolumetricConvolution, self).__init__()
self.nInputPlane = nInputPlane
self.nOutputPlane = nOutputPlane
self.kT = kT
self.kW = kW
self.kH = kH
self.dT = dT
self.dW = dW
self.dH = dH
self.padT = padT
self.padW = (padW if (padW is not None) else self.padT)
self.padH = (padH if (padH is not None) else self.padW)
self.weight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW)
self.bias = torch.Tensor(nOutputPlane)
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW)
self.gradBias = torch.Tensor(nOutputPlane)
self.reset()
self.finput = None
self.fgradInput = None
self._input = None
self._gradOutput = None
def reset(self, stdv=None):
if (stdv is not None):
stdv = (stdv * math.sqrt(3))
else:
stdv = (1.0 / math.sqrt((((self.kT * self.kW) * self.kH) * self.nInputPlane)))
self.weight.uniform_((- stdv), stdv)
self.bias.uniform_((- stdv), stdv)
def _makeContiguous(self, input, gradOutput=None):
if (not input.is_contiguous()):
if (self._input is None):
self._input = input.new()
self._input.resize_as_(input).copy_(input)
input = self._input
if (gradOutput is not None):
if (not gradOutput.is_contiguous()):
if (self._gradOutput is None):
self._gradOutput = gradOutput.new()
self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
gradOutput = self._gradOutput
return (input, gradOutput)
return input
def _viewWeight(self):
self.weight = self.weight.view(self.nOutputPlane, (((self.nInputPlane * self.kT) * self.kH) * self.kW))
if ((self.gradWeight is not None) and (self.gradWeight.dim() > 0)):
self.gradWeight = self.gradWeight.view(self.nOutputPlane, (((self.nInputPlane * self.kT) * self.kH) * self.kW))
def _unviewWeight(self):
self.weight = self.weight.view(self.nOutputPlane, self.nInputPlane, self.kT, self.kH, self.kW)
if ((self.gradWeight is not None) and (self.gradWeight.dim() > 0)):
self.gradWeight = self.gradWeight.view(self.nOutputPlane, self.nInputPlane, self.kT, self.kH, self.kW)
def updateOutput(self, input):
if (self.finput is None):
self.finput = input.new()
if (self.fgradInput is None):
self.fgradInput = input.new()
if (input.type() == 'torch.cuda.FloatTensor'):
self._backend.VolumetricConvolution_updateOutput(self._backend.library_state, input, self.output, self.weight, self.bias, self.finput, self.fgradInput, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH)
else:
self._viewWeight()
input = self._makeContiguous(input)
self._backend.VolumetricConvolutionMM_updateOutput(self._backend.library_state, input, self.output, self.weight, self.bias, self.finput, self.fgradInput, self.kT, self.kW, self.kH, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH)
self._unviewWeight()
return self.output
def updateGradInput(self, input, gradOutput):
if (self.gradInput is None):
return
if (input.type() == 'torch.cuda.FloatTensor'):
self._backend.VolumetricConvolution_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, self.weight, self.finput, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH)
else:
self._viewWeight()
(input, gradOutput) = self._makeContiguous(input, gradOutput)
self._backend.VolumetricConvolutionMM_updateGradInput(self._backend.library_state, input, gradOutput, self.gradInput, self.weight, self.finput, self.fgradInput, self.kT, self.kW, self.kH, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH)
self._unviewWeight()
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
if (input.type() == 'torch.cuda.FloatTensor'):
self._backend.VolumetricConvolution_accGradParameters(self._backend.library_state, input, gradOutput, self.gradWeight, self.gradBias, self.finput, self.fgradInput, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH, scale)
else:
(input, gradOutput) = self._makeContiguous(input, gradOutput)
self._viewWeight()
self._backend.VolumetricConvolutionMM_accGradParameters(self._backend.library_state, input, gradOutput, self.gradWeight, self.gradBias, self.finput, self.fgradInput, self.kT, self.kW, self.kH, self.dT, self.dW, self.dH, self.padT, self.padW, self.padH, scale)
self._unviewWeight()
def type(self, type, tensorCache=None):
clear(self, 'finput', 'fgradInput')
return super(VolumetricConvolution, self).type(type, tensorCache)
def clearState(self):
clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
return super(VolumetricConvolution, self).clearState()
def __repr__(self):
s = super(VolumetricConvolution, self).__repr__()
s += '({} -> {}, {}x{}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kT, self.kW, self.kH)
if ((self.dT != 1) or (self.dW != 1) or (self.dH != 1) or (self.padT != 0) or (self.padW != 0) or (self.padH != 0)):
s += ', {}, {}, {}'.format(self.dT, self.dW, self.dH)
if ((self.padT != 0) or (self.padW != 0) or (self.padH != 0)):
s += ', {}, {}, {}'.format(self.padT, self.padW, self.padH)
s += ')'
return s |
class TestNumericStyleTypecodes(_DeprecationTestCase):
def test_all_dtypes(self):
deprecated_types = ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64', 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64', 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0']
if (sys.version_info[0] < 3):
deprecated_types.extend(['Unicode0', 'String0'])
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,), args=(dt,)) |
def shuffle_in_unison(a, b):
assert (len(a) == len(b))
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
permutation = np.random.permutation(len(a))
for (old_index, new_index) in enumerate(permutation):
shuffled_a[new_index] = a[old_index]
shuffled_b[new_index] = b[old_index]
return (shuffled_a, shuffled_b) |
class OllamaLocal(LM):
def __init__(self, model: str='llama2', model_type: Literal[('chat', 'text')]=None, **kwargs):
super().__init__(model)
self.provider = 'ollama'
self.base_url = '
default_model_type = 'text'
self.model_type = (model_type if model_type else default_model_type)
self.model_name = model
self.num_cores = (multiprocessing.cpu_count() - 1)
self.timeout_duration = 15.0
self.kwargs = {'temperature': 0.0, 'max_tokens': 150, 'top_p': 1, 'top_k': 20, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'num_ctx': 1024, **kwargs}
self.kwargs['num_predict'] = self.kwargs['max_tokens']
self.history: list[dict[(str, Any)]] = []
self.version = ''
if ('version' in kwargs):
self.version = kwargs['version']
def basic_request(self, prompt: str, **kwargs):
raw_kwargs = kwargs
kwargs = {**self.kwargs, **kwargs}
request_info = post_request_metadata(self.model_name, prompt)
request_info['choices'] = []
settings_dict = {'model': self.model_name, 'options': {k: v for (k, v) in kwargs.items() if (k not in ['n', 'max_tokens'])}, 'stream': False}
if (self.model_type == 'chat'):
settings_dict['messages'] = [{'role': 'user', 'content': prompt}]
else:
settings_dict['prompt'] = prompt
urlstr = (f'{self.base_url}/api/chat' if (self.model_type == 'chat') else f'{self.base_url}/api/generate')
tot_eval_tokens = 0
for i in range(kwargs['n']):
response = requests.post(urlstr, json=settings_dict)
if (response.status_code != 200):
print(f'Error: CODE {response.status_code} - {response.text}')
response_json = response.json()
text = (response_json.get('message').get('content') if (self.model_type == 'chat') else response_json.get('response'))
request_info['choices'].append({'index': i, 'message': {'role': 'assistant', 'content': ''.join(text)}, 'finish_reason': 'stop'})
tot_eval_tokens += response_json.get('eval_count')
request_info['additional_kwargs'] = {k: v for (k, v) in response_json.items() if (k not in ['response'])}
print('RESPONSE JSON', response_json)
request_info['usage'] = {'prompt_tokens': response_json.get('prompt_eval_count'), 'completion_tokens': tot_eval_tokens, 'total_tokens': (response_json.get('prompt_eval_count') + tot_eval_tokens)}
history = {'prompt': prompt, 'response': request_info, 'kwargs': kwargs, 'raw_kwargs': raw_kwargs}
self.history.append(history)
return request_info
def request(self, prompt: str, **kwargs):
if ('model_type' in kwargs):
del kwargs['model_type']
return self.basic_request(prompt, **kwargs)
def _get_choice_text(self, choice: dict[(str, Any)]) -> str:
return choice['message']['content']
def __call__(self, prompt: str, only_completed: bool=True, return_sorted: bool=False, **kwargs) -> list[dict[(str, Any)]]:
assert only_completed, 'for now'
assert (return_sorted is False), 'for now'
response = self.request(prompt, **kwargs)
choices = response['choices']
completed_choices = [c for c in choices if (c['finish_reason'] != 'length')]
if (only_completed and len(completed_choices)):
choices = completed_choices
print(choices)
completions = [self._get_choice_text(c) for c in choices]
return completions |
def csv_sniffer_has_bug_last_field():
has_bug = getattr(csv_sniffer_has_bug_last_field, 'has_bug', None)
if (has_bug is None):
dialect = csv.Sniffer().sniff("3, 'a'")
csv_sniffer_has_bug_last_field.has_bug = (dialect.quotechar != "'")
has_bug = csv_sniffer_has_bug_last_field.has_bug
return has_bug |
class TrivialMapInitEliminationTest(unittest.TestCase):
def test_can_be_applied(self):
graph = trivial_map_init_sdfg()
count = graph.apply_transformations(TrivialMapElimination, validate=False, validate_all=False)
graph.validate()
self.assertGreater(count, 0)
def test_removes_map(self):
graph = trivial_map_init_sdfg()
state = graph.nodes()[0]
map_entries = [n for n in state.nodes() if isinstance(n, dace.sdfg.nodes.MapEntry)]
self.assertEqual(len(map_entries), 2)
graph.apply_transformations(TrivialMapElimination)
state = graph.nodes()[0]
map_entries = [n for n in state.nodes() if isinstance(n, dace.sdfg.nodes.MapEntry)]
self.assertEqual(len(map_entries), 1)
def test_reconnects_edges(self):
graph = trivial_map_init_sdfg()
graph.apply_transformations(TrivialMapElimination)
state = graph.nodes()[0]
map_entries = [n for n in state.nodes() if isinstance(n, dace.sdfg.nodes.MapEntry)]
self.assertEqual(len(map_entries), 1)
self.assertEqual(len(state.out_edges(map_entries[0])), 1) |
def _allgather_then_aggregate_hook(process_group: object, bucket: dist._GradBucket) -> torch.futures.Future:
group_to_use = (process_group if (process_group is not None) else dist.group.WORLD)
rank = (process_group.rank() if (process_group is not None) else dist.get_rank())
world_size = (process_group.size() if (process_group is not None) else dist.get_world_size())
tensor = bucket.get_tensors()[0]
fut = dist.all_gather(_get_allgather_out_list(tensor, world_size), tensor, group=group_to_use, async_op=True).get_future()
def aggregate(fut):
all_ranks_tensor = fut.value()[0]
tensor = bucket.get_tensors()[0]
for (r, gathered_tensor) in enumerate(all_ranks_tensor):
if (r != rank):
tensor += gathered_tensor
return [tensor.div_(world_size)]
return fut.then(aggregate) |
class Postgres(object):
def __init__(self, db_name, schema_name, user, password=None, host=None, port=None, verbose=False, debug=False):
self.db_name = db_name
self.user = user
self.verbose = verbose
self.debug = debug
self.cursors_opened = 0
self._table_columns = {}
self.connection = psycopg2.connect(database=db_name, user=user, password=password, host=host, port=port)
self.cursor = self.connection.cursor()
self._create_schema(schema_name)
self.execute('SET search_path TO {}, public'.format(schema_name))
self.schema_name = schema_name
def format(self, query, as_is, params):
if as_is:
query = query.format(*as_is)
return self.cursor.mogrify(query, params)
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
self.close()
def close(self):
self.cursor.close()
self.connection.close()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def query_cursor(self, q, lazy_fetch=False, commit=True):
self.cursors_opened += 1
if self.verbose:
logging.debug(q)
if self.debug:
empty_cursor = Bunch()
empty_cursor.fetchmany = (lambda size: [])
empty_cursor.fetchall = (lambda : [])
(yield empty_cursor)
return
cursor_name = ('server_side_{}'.format(self.cursors_opened) if lazy_fetch else None)
with self.connection.cursor(cursor_name, cursor_factory=RealDictCursor) as cursor:
cursor.execute(q)
(yield cursor)
if commit:
self.commit()
def execute(self, q, commit=True):
with self.query_cursor(q, commit=commit):
pass
def has_results(self, q):
with self.query_cursor(q) as cursor:
results = cursor.fetchall()
return (len(results) > 0)
def query(self, q, fetch_size=10000):
if self.verbose:
logging.debug(q)
with self.query_cursor(q, lazy_fetch=True) as cursor:
while True:
results = cursor.fetchmany(fetch_size)
for result in results:
(yield result)
if (len(results) == 0):
break
def iter_table(self, table_name):
q = self.format('SELECT * from {}', (table_name,), None)
return self.query(q)
def match_field(self, table_name, field, value):
q = self.format('SELECT * from {} where {}=%s', (table_name, field), (value,))
return self.query(q)
def match_fields(self, table_name, fields):
(keys, vals) = zip(*fields.items())
field_query = ' AND '.join(['{}=%s'.format(k) for k in keys])
field_vals = tuple(vals)
q = self.format('SELECT * from {} where {}', (table_name, field_query), field_vals)
return self.query(q)
def match_field_any(self, table_name, field, values):
q = self.format('SELECT * from {} where {} in %s', (table_name, field), (tuple(values),))
return self.query(q)
def _schema_exists(self, name):
q = self.format('SELECT schema_name FROM information_schema.schemata WHERE schema_name = %s', None, (name,))
return self.has_results(q)
def table_exists(self, name):
name = name.lower()
q = self.format('SELECT table_name FROM information_schema.tables WHERE table_schema = %s AND table_name = %s', None, (self.schema_name, name))
return self.has_results(q)
def _create_schema(self, name):
if (not self._schema_exists(name)):
q = self.format('CREATE SCHEMA {}', (name,), None)
self.execute(q)
def create_table(self, name, col_to_type):
if (not self.table_exists(name)):
col_to_type_pairs = [' '.join(i) for i in col_to_type.items()]
col_type_str = ', '.join(col_to_type_pairs)
q = self.format('CREATE TABLE {} ({})', (name, col_type_str), None)
self.execute(q)
def drop_table(self, name):
if self.table_exists(name):
q = self.format('DROP TABLE {}', (name,), None)
self.execute(q)
def add_row(self, table_name, row):
(columns, vals) = zip(*row.items())
col_str = ', '.join(columns)
vals = tuple(vals)
q = self.format('INSERT INTO {} ({}) VALUES %s', (table_name, col_str), (vals,))
self.execute(q)
def add_rows(self, table_name, table):
col_names = table.keys()
col_str = ', '.join(col_names)
unnest = ', '.join(['unnest(%({})s)'.format(n) for n in col_names])
for column in table.values():
assert isinstance(column, list)
q = self.format('INSERT INTO {} ({}) SELECT {}', (table_name, col_str, unnest), table)
self.execute(q)
def add_table(self, table_name, table, col_types):
assert (not self.table_exists(table_name))
self.create_table(table_name, col_types)
self.add_rows(table_name, table)
def table(self, name):
results = list(self.iter_table(name))
table = defaultdict(list)
for res in results:
for (key, val) in res.iteritems():
table[key].append(val)
return table
def row_count(self, table_name, approx=False):
q = self.format('select count(*) from {}', (table_name,), None)
q_approx = self.format('SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = %s', None, (table_name,))
if approx:
row = next(self.query(q_approx))
count = row['approximate_row_count']
else:
row = next(self.query(q))
count = row['count']
return int(count) |
def _moments_raw_to_central_fast(moments_raw):
ndim = moments_raw.ndim
order = (moments_raw.shape[0] - 1)
float_dtype = moments_raw.dtype
moments_raw = moments_raw.astype(np.float64, copy=False)
moments_central = np.zeros_like(moments_raw)
if ((order >= 4) or (ndim not in [2, 3])):
raise ValueError('This function only supports 2D or 3D moments of order < 4.')
m = moments_raw
if (ndim == 2):
cx = (m[(1, 0)] / m[(0, 0)])
cy = (m[(0, 1)] / m[(0, 0)])
moments_central[(0, 0)] = m[(0, 0)]
if (order > 1):
moments_central[(1, 1)] = (m[(1, 1)] - (cx * m[(0, 1)]))
moments_central[(2, 0)] = (m[(2, 0)] - (cx * m[(1, 0)]))
moments_central[(0, 2)] = (m[(0, 2)] - (cy * m[(0, 1)]))
if (order > 2):
moments_central[(2, 1)] = ((((m[(2, 1)] - ((2 * cx) * m[(1, 1)])) - (cy * m[(2, 0)])) + ((cx ** 2) * m[(0, 1)])) + ((cy * cx) * m[(1, 0)]))
moments_central[(1, 2)] = (((m[(1, 2)] - ((2 * cy) * m[(1, 1)])) - (cx * m[(0, 2)])) + (((2 * cy) * cx) * m[(0, 1)]))
moments_central[(3, 0)] = ((m[(3, 0)] - ((3 * cx) * m[(2, 0)])) + ((2 * (cx ** 2)) * m[(1, 0)]))
moments_central[(0, 3)] = ((m[(0, 3)] - ((3 * cy) * m[(0, 2)])) + ((2 * (cy ** 2)) * m[(0, 1)]))
else:
cx = (m[(1, 0, 0)] / m[(0, 0, 0)])
cy = (m[(0, 1, 0)] / m[(0, 0, 0)])
cz = (m[(0, 0, 1)] / m[(0, 0, 0)])
moments_central[(0, 0, 0)] = m[(0, 0, 0)]
if (order > 1):
moments_central[(0, 0, 2)] = (((- cz) * m[(0, 0, 1)]) + m[(0, 0, 2)])
moments_central[(0, 1, 1)] = (((- cy) * m[(0, 0, 1)]) + m[(0, 1, 1)])
moments_central[(0, 2, 0)] = (((- cy) * m[(0, 1, 0)]) + m[(0, 2, 0)])
moments_central[(1, 0, 1)] = (((- cx) * m[(0, 0, 1)]) + m[(1, 0, 1)])
moments_central[(1, 1, 0)] = (((- cx) * m[(0, 1, 0)]) + m[(1, 1, 0)])
moments_central[(2, 0, 0)] = (((- cx) * m[(1, 0, 0)]) + m[(2, 0, 0)])
if (order > 2):
moments_central[(0, 0, 3)] = ((((2 * (cz ** 2)) * m[(0, 0, 1)]) - ((3 * cz) * m[(0, 0, 2)])) + m[(0, 0, 3)])
moments_central[(0, 1, 2)] = ((((- cy) * m[(0, 0, 2)]) + ((2 * cz) * ((cy * m[(0, 0, 1)]) - m[(0, 1, 1)]))) + m[(0, 1, 2)])
moments_central[(0, 2, 1)] = (((((cy ** 2) * m[(0, 0, 1)]) - ((2 * cy) * m[(0, 1, 1)])) + (cz * ((cy * m[(0, 1, 0)]) - m[(0, 2, 0)]))) + m[(0, 2, 1)])
moments_central[(0, 3, 0)] = ((((2 * (cy ** 2)) * m[(0, 1, 0)]) - ((3 * cy) * m[(0, 2, 0)])) + m[(0, 3, 0)])
moments_central[(1, 0, 2)] = ((((- cx) * m[(0, 0, 2)]) + ((2 * cz) * ((cx * m[(0, 0, 1)]) - m[(1, 0, 1)]))) + m[(1, 0, 2)])
moments_central[(1, 1, 1)] = (((((- cx) * m[(0, 1, 1)]) + (cy * ((cx * m[(0, 0, 1)]) - m[(1, 0, 1)]))) + (cz * ((cx * m[(0, 1, 0)]) - m[(1, 1, 0)]))) + m[(1, 1, 1)])
moments_central[(1, 2, 0)] = ((((- cx) * m[(0, 2, 0)]) - ((2 * cy) * (((- cx) * m[(0, 1, 0)]) + m[(1, 1, 0)]))) + m[(1, 2, 0)])
moments_central[(2, 0, 1)] = (((((cx ** 2) * m[(0, 0, 1)]) - ((2 * cx) * m[(1, 0, 1)])) + (cz * ((cx * m[(1, 0, 0)]) - m[(2, 0, 0)]))) + m[(2, 0, 1)])
moments_central[(2, 1, 0)] = (((((cx ** 2) * m[(0, 1, 0)]) - ((2 * cx) * m[(1, 1, 0)])) + (cy * ((cx * m[(1, 0, 0)]) - m[(2, 0, 0)]))) + m[(2, 1, 0)])
moments_central[(3, 0, 0)] = ((((2 * (cx ** 2)) * m[(1, 0, 0)]) - ((3 * cx) * m[(2, 0, 0)])) + m[(3, 0, 0)])
return moments_central.astype(float_dtype, copy=False) |
def MOLS_table(start, stop=None, compare=False, width=None):
from .orthogonal_arrays import largest_available_k
if (stop is None):
(start, stop) = (0, start)
start = (start - (start % 20))
stop = (stop - 1)
stop = (stop + (20 - (stop % 20)))
assert (((start % 20) == 0) and ((stop % 20) == 0))
if (stop <= start):
return
if compare:
handbook_file = open('{}/MOLS_table.txt'.format(COMBINATORIAL_DESIGN_DATA_DIR), 'r')
hb = [int(_) for _ in handbook_file.readlines()[9].split(',')]
handbook_file.close()
if (width is None):
width = max(3, Integer((stop - 1)).ndigits(10))
print(((' ' * (width + 2)) + ' '.join(('{i:>{width}}'.format(i=i, width=width) for i in range(20)))))
print(((' ' * (width + 1)) + ('_' * ((width + 1) * 20))), end='')
for i in range(start, stop):
if ((i % 20) == 0):
print('\n{:>{width}}|'.format(i, width=width), end='')
k = (largest_available_k(i) - 2)
if compare:
if ((i < 2) or (hb[i] == k)):
c = ''
elif (hb[i] < k):
c = '+'
else:
c = '-'
elif (i < 2):
c = '+oo'
else:
c = k
print(' {:>{width}}'.format(c, width=width), end='') |
class RNNCell(RNNCellBase):
__constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity']
def __init__(self, input_size, hidden_size, bias=True, nonlinearity='tanh', dtype=torch.qint8):
super(RNNCell, self).__init__(input_size, hidden_size, bias, num_chunks=1, dtype=dtype)
self.nonlinearity = nonlinearity
def _get_name(self):
return 'DynamicQuantizedRNNCell'
def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:
self.check_forward_input(input)
if (hx is None):
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
if (self.nonlinearity == 'tanh'):
ret = torch.ops.quantized.quantized_rnn_tanh_cell_dynamic(input, hx, self._packed_weight_ih, self._packed_weight_hh, self.bias_ih, self.bias_hh)
elif (self.nonlinearity == 'relu'):
ret = torch.ops.quantized.quantized_rnn_relu_cell_dynamic(input, hx, self._packed_weight_ih, self._packed_weight_hh, self.bias_ih, self.bias_hh)
else:
ret = input
raise RuntimeError('Unknown nonlinearity: {}'.format(self.nonlinearity))
return ret
def from_float(cls, mod):
return super(RNNCell, cls).from_float(mod) |
def get_entity_from_task_config(task_dict: dict):
entity_dict = dict()
entity_dict['Entity'] = dict()
for task in list(task_dict.keys()):
if ('entities' in task_dict[task]):
for entity in task_dict[task]['entities']:
if (entity not in entity_dict['Entity']):
entity_dict['Entity'][entity] = dict()
if ('type' in task_dict[task]['entities'][entity]):
entity_dict['Entity'][entity]['type'] = task_dict[task]['entities'][entity]['type']
if ('methods' in task_dict[task]['entities'][entity]):
entity_dict['Entity'][entity]['methods'] = deepcopy(task_dict[task]['entities'][entity]['methods'])
if ('suggest_value' in task_dict[task]['entities'][entity]):
entity_dict['Entity'][entity]['suggest_value'] = task_dict[task]['entities'][entity]['suggest_value']
return entity_dict |
class ConvModBlock(nn.Module):
def __init__(self, dim, mlp_ratio=4.0, drop_path=0.0):
super().__init__()
self.attn = ConvMod(dim)
self.mlp = MLP(dim, mlp_ratio)
layer_scale_init_value = 1e-06
self.layer_scale_1 = nn.Parameter((layer_scale_init_value * torch.ones(dim)), requires_grad=True)
self.layer_scale_2 = nn.Parameter((layer_scale_init_value * torch.ones(dim)), requires_grad=True)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
def forward(self, x):
x = (x + self.drop_path((self.layer_scale_1.unsqueeze((- 1)).unsqueeze((- 1)) * self.attn(x))))
x = (x + self.drop_path((self.layer_scale_2.unsqueeze((- 1)).unsqueeze((- 1)) * self.mlp(x))))
return x |
def DenseNet121(nclass):
return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32, num_classes=nclass) |
class TestFirls(object):
def test_bad_args(self):
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], ([0] * 4))
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], ([0] * 4))
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], ([0] * 4))
assert_raises(ValueError, firls, 11, [0.1, 0.2], [(- 1), 1])
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2])
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [(- 1)])
def test_firls(self):
N = 11
a = 0.1
h = firls(11, [0, a, (0.5 - a), 0.5], [1, 1, 0, 0], fs=1.0)
assert_equal(len(h), N)
midx = ((N - 1) // 2)
assert_array_almost_equal(h[:midx], h[:((- midx) - 1):(- 1)])
assert_almost_equal(h[midx], 0.5)
hodd = np.hstack((h[1:midx:2], h[((- midx) + 1)::2]))
assert_array_almost_equal(hodd, 0)
(w, H) = freqz(h, 1)
f = ((w / 2) / np.pi)
Hmag = np.abs(H)
idx = np.logical_and((f > 0), (f < a))
assert_array_almost_equal(Hmag[idx], 1, decimal=3)
idx = np.logical_and((f > (0.5 - a)), (f < 0.5))
assert_array_almost_equal(Hmag[idx], 0, decimal=3)
def test_compare(self):
taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2])
known_taps = [(- 0.), (- 0.), (- 0.), 0., 0., 0., (- 0.), (- 0.), (- 0.)]
assert_allclose(taps, known_taps)
taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2])
known_taps = [0., (- 0.), (- 0.), 0., 0., 0., 0., 0., (- 0.), (- 0.), 0.]
assert_allclose(taps, known_taps)
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20)
known_taps = [1., (- 4.), 7., (- 8.), 7., (- 4.), 1.]
assert_allclose(taps, known_taps)
taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10)
assert_allclose(taps, known_taps)
with pytest.raises(ValueError, match='between 0 and 1'):
firls(7, [0, 1], [0, 1], nyq=0.5)
def test_rank_deficient(self):
x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0])
(w, h) = freqz(x, fs=2.0)
assert_allclose(np.abs(h[:2]), 1.0, atol=1e-05)
assert_allclose(np.abs(h[(- 2):]), 0.0, atol=1e-06)
x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0])
(w, h) = freqz(x, fs=2.0)
mask = (w < 0.01)
assert (mask.sum() > 3)
assert_allclose(np.abs(h[mask]), 1.0, atol=0.0001)
mask = (w > 0.99)
assert (mask.sum() > 3)
assert_allclose(np.abs(h[mask]), 0.0, atol=0.0001) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('p', [None, 1.0, 1.3, 3.0])
.parametrize('shape, axis', [((2, 3, 5, 7), (0, 2)), ((13,), 0), ((7, 3, 1), None), ((2, 1, 4, 5), (0, 2))])
.parametrize('eps', [1e-12])
def test_norm_normalization_forward_backward(eps, axis, p, shape, seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region, function_tester
from sys import platform
if (platform == 'darwin'):
pytest.skip('NormNormalization is not supported in macOS.')
rng = np.random.RandomState(seed)
inputs = [cap_ignore_region((rng.randn(*shape).astype(np.float32) * 2), ((- 0.001), 0.001))]
func_args = [p, axis, eps]
function_tester(rng, F.norm_normalization, ref_norm_normalization, inputs, ctx=ctx, func_name=func_name, func_args=func_args, backward=[True], disable_half_test=False, atol_b=0.01, atol_accum=0.01) |
def point_maze(maze_str):
maze_arr = parse_maze(maze_str)
mjcmodel = MJCModel('point_maze')
mjcmodel.root.compiler(inertiafromgeom='true', angle='radian', coordinate='local')
mjcmodel.root.option(timestep='0.01', gravity='0 0 0', iterations='20', integrator='Euler')
default = mjcmodel.root.default()
default.joint(damping=1, limited='false')
default.geom(friction='.5 .1 .1', density='1000', margin='0.002', condim='1', contype='2', conaffinity='1')
asset = mjcmodel.root.asset()
asset.texture(type='2d', name='groundplane', builtin='checker', rgb1='0.2 0.3 0.4', rgb2='0.1 0.2 0.3', width=100, height=100)
asset.texture(name='skybox', type='skybox', builtin='gradient', rgb1='.4 .6 .8', rgb2='0 0 0', width='800', height='800', mark='random', markrgb='1 1 1')
asset.material(name='groundplane', texture='groundplane', texrepeat='20 20')
asset.material(name='wall', rgba='.7 .5 .3 1')
asset.material(name='target', rgba='.6 .3 .3 1')
visual = mjcmodel.root.visual()
visual.headlight(ambient='.4 .4 .4', diffuse='.8 .8 .8', specular='0.1 0.1 0.1')
visual.map(znear=0.01)
visual.quality(shadowsize=2048)
worldbody = mjcmodel.root.worldbody()
worldbody.geom(name='ground', size='40 40 0.25', pos='0 0 -0.1', type='plane', contype=1, conaffinity=0, material='groundplane')
particle = worldbody.body(name='particle', pos=[1.2, 1.2, 0])
particle.geom(name='particle_geom', type='sphere', size=0.1, rgba='0.0 0.0 1.0 0.0', contype=1)
particle.site(name='particle_site', pos=[0.0, 0.0, 0], size=0.2, rgba='0.3 0.6 0.3 1')
particle.joint(name='ball_x', type='slide', pos=[0, 0, 0], axis=[1, 0, 0])
particle.joint(name='ball_y', type='slide', pos=[0, 0, 0], axis=[0, 1, 0])
worldbody.site(name='target_site', pos=[0.0, 0.0, 0], size=0.2, material='target')
(width, height) = maze_arr.shape
for w in range(width):
for h in range(height):
if (maze_arr[(w, h)] == WALL):
worldbody.geom(conaffinity=1, type='box', name=('wall_%d_%d' % (w, h)), material='wall', pos=[(w + 1.0), (h + 1.0), 0], size=[0.5, 0.5, 0.2])
actuator = mjcmodel.root.actuator()
actuator.motor(joint='ball_x', ctrlrange=[(- 1.0), 1.0], ctrllimited=True, gear=100)
actuator.motor(joint='ball_y', ctrlrange=[(- 1.0), 1.0], ctrllimited=True, gear=100)
return mjcmodel |
class BloclLocalStorage(BenchmarkItem):
name = 'bls'
def __init__(self):
self._items = {'bls_on': True, 'bls_off': False} |
def edge_accurate(pred, target):
true_labels = retrieve_adjacency_matrix(target)
predictions = retrieve_adjacency_matrix(pred, (target.nodes() if isinstance(target, nx.DiGraph) else None))
total_edges = true_labels.sum()
tp = ((predictions == 1) & (predictions == true_labels)).sum()
tn = ((predictions == 0) & (predictions == true_labels)).sum()
return (total_edges, tp, tn) |
def test_case124():
url = (brokerIp + '/ngsi-ld/v1/subscriptions/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata123), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 201) |
def get_concat_h(im1, im2):
dst = PIL.Image.new('RGB', ((im1.width + im2.width), im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
(errors, fails) = sb.parse_utils.errors_fails(exit_code, log)
errors.discard('EXIT_CODE_1')
for f in list(fails):
if f.startswith('exception (teether.evm.exceptions.'):
fails.remove(f)
elif (f.startswith('exception (z3.z3types.Z3Exception: b"Argument ') or f.startswith("exception (z3.z3types.Z3Exception: b'Argument ")):
fails.remove(f)
fails.add('exception (z3.z3types.Z3Exception: Argument does not match function declaration)')
exploit = []
analysis_completed = False
for line in log:
if line.startswith('INFO:root:Could not exploit any RETURN+CALL'):
infos.add('Could not exploit any RETURN+CALL')
analysis_completed = True
elif line.startswith('WARNING:root:No state-dependent critical path found, aborting'):
infos.add('No state-dependent critical path found')
analysis_completed = True
elif line.startswith('eth.sendTransaction'):
exploit.append(line)
analysis_completed = True
elif line.startswith('ERROR:root:'):
error = line[11:]
if error.startswith('Failed path due to '):
e = error[19:]
if (e.startswith("b'Argument ") or any(((e in f) for f in fails))):
continue
if e.startswith('Symbolic code index'):
error = 'Failed path due to Symbolic code index'
elif e.startswith('balance of symbolic address'):
error = 'Failed path due to balance of symbolic address'
errors.add(error)
if (log and (not analysis_completed)):
infos.add('analysis incomplete')
if ((not fails) and (not errors)):
fails.add('execution failed')
if exploit:
findings = [{'name': 'Ether leak', 'exploit': exploit}]
return (findings, infos, errors, fails) |
class UNet(nn.Module):
def l2n(self, x):
return torch.nn.functional.normalize(x, p=2.0, dim=1)
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.IN = nn.InstanceNorm2d(1)
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
NUM = 1
self.inc = DoubleConv(n_channels, (64 // NUM))
self.down1 = Down((64 // NUM), (128 // NUM))
self.down2 = Down((128 // NUM), (256 // NUM))
self.down3 = Down((256 // NUM), (512 // NUM))
factor = (1 if bilinear else 1)
self.down4 = Down((512 // NUM), ((1024 // factor) // NUM))
self.down5 = Down((1024 // NUM), ((2048 // factor) // NUM))
self.up0 = Up((2048 // NUM), ((1024 // factor) // NUM), bilinear)
self.up1 = Up((1024 // NUM), ((512 // factor) // NUM), bilinear)
self.up2 = Up((512 // NUM), ((256 // factor) // NUM), bilinear)
self.up3 = Up((256 // NUM), ((128 // factor) // NUM), bilinear)
self.up4 = Up((128 // NUM), ((64 // factor) // NUM), bilinear)
self.outc = OutConv((64 // NUM), n_classes)
def forward(self, x):
x = self.IN(x)
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x6 = self.down5(x5)
x = self.IN(x6)
x = self.up0(x6, x5)
x = self.up1(x, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits |
class BinnedDataset(Dataset):
def __init__(self, df, data_dir, num_bins, graph_featurizer, num_workers=0, upper_limit=1500, form_dir_name: str='subform_20', use_ray=False, **kwargs):
self.df = df
self.num_bins = num_bins
self.num_workers = num_workers
self.upper_limit = upper_limit
self.bins = np.linspace(0, self.upper_limit, self.num_bins)
self.name_to_adduct = dict(self.df[['spec', 'ionization']].values)
self.smiles = self.df['smiles'].values
self.graph_featurizer = graph_featurizer
self.num_atom_feats = self.graph_featurizer.num_atom_feats
self.num_bond_feats = self.graph_featurizer.num_bond_feats
if (self.num_workers == 0):
self.mols = [Chem.MolFromSmiles(i) for i in self.smiles]
self.weights = [common.ExactMolWt(i) for i in self.mols]
self.mol_graphs = [self.graph_featurizer.get_dgl_graph(i) for i in self.mols]
else:
mol_from_smi = (lambda x: Chem.MolFromSmiles(x))
self.mols = common.chunked_parallel(self.smiles, mol_from_smi, chunks=100, max_cpu=self.num_workers, timeout=600, max_retries=3, use_ray=use_ray)
self.weights = common.chunked_parallel(self.mols, (lambda x: common.ExactMolWt(x)), chunks=100, max_cpu=self.num_workers, timeout=600, max_retries=3, use_ray=use_ray)
self.mol_graphs = common.chunked_parallel(self.mols, self.graph_featurizer.get_dgl_graph, chunks=100, max_cpu=self.num_workers, timeout=4000, max_retries=3, use_ray=use_ray)
self.weights = np.array(self.weights)
self.spec_names = self.df['spec'].values
spec_files = [(((data_dir / 'subformulae') / f'{form_dir_name}') / f'{spec_name}.json') for spec_name in self.spec_names]
process_spec_file = (lambda x: common.bin_form_file(x, num_bins=num_bins, upper_limit=upper_limit))
if (self.num_workers == 0):
spec_outputs = [process_spec_file(i) for i in spec_files]
else:
spec_outputs = common.chunked_parallel(spec_files, process_spec_file, chunks=100, max_cpu=self.num_workers, timeout=4000, max_retries=3, use_ray=use_ray)
(self.metas, self.spec_ars) = zip(*spec_outputs)
mask = np.array([(i is not None) for i in self.spec_ars])
logging.info(f'Could not find tables for {np.sum((~ mask))} spec')
self.metas = np.array(self.metas)[mask].tolist()
self.spec_ars = np.array(self.spec_ars, dtype=object)[mask].tolist()
self.df = self.df[mask]
self.spec_names = np.array(self.spec_names)[mask].tolist()
self.weights = np.array(self.weights)[mask].tolist()
self.mol_graphs = np.array(self.mol_graphs, dtype=object)[mask].tolist()
self.adducts = [common.ion2onehot_pos[self.name_to_adduct[i]] for i in self.spec_names]
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
name = self.spec_names[idx]
meta = self.metas[idx]
ar = self.spec_ars[idx]
graph = self.mol_graphs[idx]
full_weight = self.weights[idx]
adduct = self.adducts[idx]
outdict = {'name': name, 'binned': ar, 'full_weight': full_weight, 'adduct': adduct, 'graph': graph, '_meta': meta}
return outdict
def get_collate_fn(cls):
return BinnedDataset.collate_fn
def collate_fn(input_list):
names = [j['name'] for j in input_list]
spec_ars = [j['binned'] for j in input_list]
graphs = [j['graph'] for j in input_list]
full_weight = [j['full_weight'] for j in input_list]
adducts = [j['adduct'] for j in input_list]
spectra_tensors = torch.stack([torch.tensor(spectra) for spectra in spec_ars])
full_weight = torch.FloatTensor(full_weight)
batched_graph = dgl.batch(graphs)
adducts = torch.FloatTensor(adducts)
return_dict = {'spectra': spectra_tensors, 'graphs': batched_graph, 'names': names, 'adducts': adducts, 'full_weight': full_weight}
return return_dict |
def load_data(path, flag='train'):
data_npz = np.load(os.path.join(path, 'data_{}.npz'.format(flag)))
with open(os.path.join(path, 'data_feature_output.pkl'), 'rb') as f:
data_feature_outputs = pickle.load(f)
with open(os.path.join(path, 'data_attribute_output.pkl'), 'rb') as f:
data_attribute_outputs = pickle.load(f)
data_feature = data_npz['data_feature']
data_attribute = data_npz['data_attribute']
data_gen_flag = data_npz['data_gen_flag']
return (data_feature, data_attribute, data_gen_flag, data_feature_outputs, data_attribute_outputs) |
class TransferModule(nn.Module):
def forward(self, node_attn, edge_attn):
new_attn = torch.matmul(node_attn, edge_attn.float())
return new_attn |
def _preprocess_dataset_for_language_modeling(dataset: Sequence, sep_token: Optional[int]=None, conditional: bool=True):
decision_to_str = {'REJECTED': 0, 'ACCEPTED': 1, 'PENDING': 2, 'CONT-REJECTED': 3, 'CONT-ACCEPTED': 4, 'CONT-PENDING': 5}
indices_of_cont_patents = {v for (k, v) in decision_to_str.items() if k.startswith('CONT-')}
def map_decision_to_string(example):
return {'decision': decision_to_str[example['decision']]}
print('Mapping decision to integer')
dataset = dataset.map(map_decision_to_string)
print('Processed dataset cached to: ')
pprint(dataset.cache_files)
def filter_cont_patents(e):
return (e['decision'] not in indices_of_cont_patents)
def format_example_for_language_modeling(e):
if ('What is claimed is:' in e['claims'][:50]):
e['claims'] = e['claims'].replace('What is claimed is:', '')
if conditional:
text = 'TITLE {title} {sep} YEAR {year} {sep} IPC {ipc} {sep} CLAIMS {claims}'.format(sep=sep_token, title=e['title'], year=e['filing_date'][:4], ipc=e['ipc_label'][:4], claims=e['claims'])
else:
text = e['claims']
return {'text': text}
print('Filtering out CONT patents')
print(f'[OLD] len(dataset) = {len(dataset)}')
dataset = dataset.filter(filter_cont_patents)
print(f'[NEW] len(dataset) = {len(dataset)}')
print('Formatting examples for language modeling')
dataset = dataset.map(format_example_for_language_modeling, batched=False)
return dataset |
class BeatsEncoder(BaseEncoder):
def __init__(self, checkpoint_path=ckp_path):
super().__init__()
if is_url(checkpoint_path):
cached_file = download_cached_file(checkpoint_path, check_hash=False, progress=True)
checkpoint = torch.load(cached_file)
elif os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
cfg = BEATsConfig(checkpoint['cfg'])
self.num_features = cfg.encoder_embed_dim
self.model = BEATs(cfg)
self.model.load_state_dict(checkpoint['model'])
self.model.eval()
def from_config(cls, cfg):
checkpoint_path = cfg.get('checkpoint_path', ckp_path)
return cls(checkpoint_path)
def forward(self, x):
with torch.no_grad():
return self.model.extract_features(x.squeeze(1))[0] |
def get_sequence_mask(sequence_len):
batch_size = sequence_len.size()[0]
max_len = torch.max(sequence_len)
tmp = torch.arange(max_len, device=sequence_len.device).expand(batch_size, max_len)
return (tmp < sequence_len.unsqueeze(1)) |
class GroupViTTextConfig(PretrainedConfig):
model_type = 'groupvit_text_model'
def __init__(self, vocab_size=49408, hidden_size=256, intermediate_size=1024, num_hidden_layers=12, num_attention_heads=4, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'groupvit'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def save_results(input_img, gt_data, density_map, output_dir, fname='results.png'):
density_map[(density_map < 0)] = 0
input_img = input_img[0][0].astype(np.uint8)
gt_data = ((255 * gt_data) / np.max(gt_data))
density_map = ((255 * density_map) / np.max(density_map))
gt_data = gt_data[0][0]
density_map = density_map[0][0]
gt_data = gt_data.astype(np.uint8)
gt_data = cv2.applyColorMap(gt_data, 2)
cv2.imwrite(os.path.join('.', output_dir, fname).replace('.h5', 'gt.bmp').replace('.jpg', 'gt.bmp'), gt_data)
density_map = density_map.astype(np.uint8)
density_map = cv2.applyColorMap(density_map, 2)
cv2.imwrite(os.path.join('.', output_dir, fname).replace('.h5', 'refine.bmp').replace('.jpg', 'refine.bmp'), density_map)
result_img = np.hstack((gt_data, density_map))
cv2.imwrite(os.path.join('.', output_dir, fname).replace('.h5', 'fuse.jpg').replace('.jpg', '.fuse.jpg'), result_img) |
def test_same_predict() -> None:
mapie_cal = MapieCalibrator(method='top_label')
mapie_cal.fit(X=X_, y=y_, random_state=random_state)
y_pred_calib_set = mapie_cal.single_estimator_.predict(X=X_test)
y_pred_calib_set_through_predict = mapie_cal.predict(X=X_test)
y_pred_calibrated_test_set = np.nanargmax(mapie_cal.predict_proba(X=X_test), axis=1)
np.testing.assert_allclose(y_pred_calib_set, y_pred_calibrated_test_set)
np.testing.assert_allclose(y_pred_calib_set, y_pred_calib_set_through_predict) |
class MixConv2d(nn.Module):
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super().__init__()
groups = len(k)
if equal_ch:
i = torch.linspace(0, (groups - 1e-06), c2).floor()
c_ = [(i == g).sum() for g in range(groups)]
else:
b = ([c2] + ([0] * groups))
a = np.eye((groups + 1), groups, k=(- 1))
a -= np.roll(a, 1, axis=1)
a *= (np.array(k) ** 2)
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round()
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, (k[g] // 2), bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
return (x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))) |
def test_is_datetime_type_with_mixed_array():
data = [pd.to_datetime('2020-01-01'), '1890-03-05', pd.Timestamp('01-01-01'), datetime(2020, 1, 1), np.nan]
is_datetime = is_datetime_type(data)
assert is_datetime |
def arrowed_spines(fig, ax):
(xmin, xmax) = ax.get_xlim()
(ymin, ymax) = ax.get_ylim()
for side in ['bottom', 'right', 'top', 'left']:
ax.spines[side].set_visible(False)
plt.xticks([])
plt.yticks([])
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
dps = fig.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(dps)
(width, height) = (bbox.width, bbox.height)
hw = ((1.0 / 20.0) * (ymax - ymin))
hl = ((1.0 / 20.0) * (xmax - xmin))
lw = 1.0
ohg = 0.3
yhw = ((((hw / (ymax - ymin)) * (xmax - xmin)) * height) / width)
yhl = ((((hl / (xmax - xmin)) * (ymax - ymin)) * width) / height)
ax.arrow(xmin, ymin, (xmax - xmin), 0.0, fc='k', ec='k', lw=lw, head_width=hw, head_length=hl, overhang=ohg, length_includes_head=True, clip_on=False)
ax.arrow(xmin, ymin, 0.0, (ymax - ymin), fc='k', ec='k', lw=lw, head_width=yhw, head_length=yhl, overhang=ohg, length_includes_head=True, clip_on=False) |
_LAYERS.register_module(name='Clip')
_LAYERS.register_module()
class Clamp(nn.Module):
def __init__(self, min=(- 1.0), max=1.0):
super(Clamp, self).__init__()
self.min = min
self.max = max
def forward(self, x):
return torch.clamp(x, min=self.min, max=self.max) |
class DDIMSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if (type(attr) == torch.Tensor):
if (attr.device != torch.device('cuda')):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize='uniform', ddim_eta=0.0, verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert (alphas_cumprod.shape[0] == self.ddpm_num_timesteps), 'alphas have to be defined for each timestep'
to_torch = (lambda x: x.clone().detach().to(torch.float32).to(self.model.device))
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log((1.0 - alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt((1.0 / alphas_cumprod.cpu()))))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(((1.0 / alphas_cumprod.cpu()) - 1))))
(ddim_sigmas, ddim_alphas, ddim_alphas_prev) = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt((1.0 - ddim_alphas)))
sigmas_for_original_sampling_steps = (ddim_eta * torch.sqrt((((1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod)) * (1 - (self.alphas_cumprod / self.alphas_cumprod_prev)))))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
_grad()
def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0.0, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1.0, unconditional_conditioning=None, **kwargs):
if (conditioning is not None):
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list):
ctmp = ctmp[0]
cbs = ctmp.shape[0]
if (cbs != batch_size):
print(f'Warning: Got {cbs} conditionings but batch-size is {batch_size}')
elif (conditioning.shape[0] != batch_size):
print(f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}')
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
(C, H, W) = shape
size = (batch_size, C, H, W)
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
(samples, intermediates) = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
return (samples, intermediates)
_grad()
def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
device = self.model.betas.device
b = shape[0]
if (x_T is None):
img = torch.randn(shape, device=device)
else:
img = x_T
if (timesteps is None):
timesteps = (self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps)
elif ((timesteps is not None) and (not ddim_use_original_steps)):
subset_end = (int((min((timesteps / self.ddim_timesteps.shape[0]), 1) * self.ddim_timesteps.shape[0])) - 1)
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = (reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps))
total_steps = (timesteps if ddim_use_original_steps else timesteps.shape[0])
print(f'Running DDIM Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((b,), step, device=device, dtype=torch.long)
if (mask is not None):
assert (x0 is not None)
img_orig = self.model.q_sample(x0, ts)
img = ((img_orig * mask) + ((1.0 - mask) * img))
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
(img, pred_x0) = outs
if callback:
callback(i)
if img_callback:
img_callback(pred_x0, i)
if (((index % log_every_t) == 0) or (index == (total_steps - 1))):
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return (img, intermediates)
_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None):
(b, *_, device) = (*x.shape, x.device)
if ((unconditional_conditioning is None) or (unconditional_guidance_scale == 1.0)):
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat(([x] * 2))
t_in = torch.cat(([t] * 2))
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [torch.cat([unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
(e_t_uncond, e_t) = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = (e_t_uncond + (unconditional_guidance_scale * (e_t - e_t_uncond)))
if (score_corrector is not None):
assert (self.model.parameterization == 'eps')
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = (self.model.alphas_cumprod if use_original_steps else self.ddim_alphas)
alphas_prev = (self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev)
sqrt_one_minus_alphas = (self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas)
sigmas = (self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas)
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
pred_x0 = ((x - (sqrt_one_minus_at * e_t)) / a_t.sqrt())
if quantize_denoised:
(pred_x0, _, *_) = self.model.first_stage_model.quantize(pred_x0)
dir_xt = (((1.0 - a_prev) - (sigma_t ** 2)).sqrt() * e_t)
noise = ((sigma_t * noise_like(x.shape, device, repeat_noise)) * temperature)
if (noise_dropout > 0.0):
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = (((a_prev.sqrt() * pred_x0) + dir_xt) + noise)
return (x_prev, pred_x0)
_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if (noise is None):
noise = torch.randn_like(x0)
return ((extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0) + (extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise))
_grad()
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, use_original_steps=False):
timesteps = (np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps)
timesteps = timesteps[:t_start]
time_range = np.flip(timesteps)
total_steps = timesteps.shape[0]
print(f'Running DDIM Sampling with {total_steps} timesteps')
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
x_dec = x_latent
for (i, step) in enumerate(iterator):
index = ((total_steps - i) - 1)
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
(x_dec, _) = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning)
return x_dec |
def update(G, B, h):
R = h.parent()
LCM = R.monomial_lcm
def lt_divides(x, y):
return (R.monomial_divides(LM(h), LM(g)) and LC(h).divides(LC(g)))
def lt_pairwise_prime(x, y):
return (R.monomial_pairwise_prime(LM(x), LM(y)) and (gcd(LC(x), LC(y)) == 1))
def lcm_divides(f, g1, h):
return (R.monomial_divides(LCM(LM(h), LM(f[1])), LCM(LM(h), LM(g1))) and lcm(LC(h), LC(f[1])).divides(lcm(LC(h), LC(g1))))
C = set(((h, g) for g in G))
D = set()
while C:
(h, g1) = C.pop()
if (lt_pairwise_prime(h, g1) or ((not any((lcm_divides(f, g1, h) for f in C))) and (not any((lcm_divides(f, g1, h) for f in D))))):
D.add((h, g1))
E = set()
while D:
(h, g) = D.pop()
if (not lt_pairwise_prime(h, g)):
E.add((h, g))
B_new = set()
while B:
(g1, g2) = B.pop()
lcm_12 = (lcm(LC(g1), LC(g2)) * LCM(LM(g1), LM(g2)))
if ((not lt_divides(lcm_12, h)) or ((lcm(LC(g1), LC(h)) * R.monomial_lcm(LM(g1), LM(h))) == lcm_12) or ((lcm(LC(h), LC(g2)) * R.monomial_lcm(LM(h), LM(g2))) == lcm_12)):
B_new.add((g1, g2))
B_new = B_new.union(E)
G_new = set()
while G:
g = G.pop()
if (not lt_divides(g, h)):
G_new.add(g)
G_new.add(h)
return (G_new, B_new) |
def QDM_57_9_1_1_8():
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as G
B = [None, 1, 6, 7, 9, 19, 38, 42, 49]
OA = orthogonal_array(9, 9, 2)
M = [R for R in OA if any(((R[0] != x) for x in R))]
M = [[B[x] for x in R] for R in M]
M.append(([0] * 9))
return (G(57), M) |
.parametrize('observation_shape', [(4,), ((4,), (8,))])
.parametrize('length', [100])
.parametrize('pad_size', [5])
def test_batch_pad_observations(observation_shape: Shape, length: int, pad_size: int) -> None:
observations = create_observations(observation_shape, length)
padded_observations = batch_pad_observations(observations, pad_size)
if isinstance(padded_observations, list):
for (i, shape) in enumerate(observation_shape):
assert isinstance(shape, tuple)
assert (padded_observations[i].shape == ((pad_size + length), *shape))
assert np.all((padded_observations[i][pad_size:] == observations[i]))
assert np.all((padded_observations[i][:pad_size] == 0.0))
else:
assert isinstance(padded_observations, np.ndarray)
assert (padded_observations.shape == ((pad_size + length), *observation_shape))
assert np.all((padded_observations[pad_size:] == observations))
assert np.all((padded_observations[:pad_size] == 0.0)) |
def preprocess_with_artifacts(net_preproc_fn, jpeg_quality_range, scale_factor_range, jitter=True):
preproc_fn = prepare_image_fn(jitter=jitter)
artifact_fn = generate_induce_artifacts(jpeg_quality_range, scale_factor_range)
return transforms.Compose((preproc_fn, artifact_fn, transforms.Lambda(net_preproc_fn))) |
class SequenceNode(ExprNode):
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, 'more than 1 starred expression in assignment')
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for (i, arg) in enumerate(self.args):
if (not skip_children):
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if (not self.mult_factor.type.is_int):
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
return self
def coerce_to_ctuple(self, dst_type, env):
if (self.type == dst_type):
return self
assert (not self.mult_factor)
if (len(self.args) != dst_type.size):
error(self.pos, ('trying to coerce sequence to ctuple of wrong length, expected %d, got %d' % (dst_type.size, len(self.args))))
coerced_args = [arg.coerce_to(type, env) for (arg, type) in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
def _create_merge_node_if_necessary(self, env):
self._flatten_starred_args()
if (not any((arg.is_starred for arg in self.args))):
return self
args = []
values = []
for arg in self.args:
if arg.is_starred:
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
values = []
args.append(arg.target)
else:
values.append(arg)
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
node = MergedSequenceNode(self.pos, args, self.type)
if self.mult_factor:
node = binop_node(self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env), inplace=True, type=self.type, is_temp=True)
return node
def _flatten_starred_args(self):
args = []
for arg in self.args:
if (arg.is_starred and arg.target.is_sequence_constructor and (not arg.target.mult_factor)):
args.extend(arg.target.args)
else:
args.append(arg)
self.args[:] = args
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for (i, arg) in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if (not arg.type.assignable_from(list_type)):
error(arg.pos, 'starred target must have Python object (list) type')
if (arg.type is py_object_type):
arg.type = list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if (unpacked_item is not coerced_unpacked_item):
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if (target is None):
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if (self.mult_factor and (not plain)):
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if (isinstance(mult_factor.constant_result, _py_int_types) and (mult_factor.constant_result > 0)):
size_factor = (' * %s' % mult_factor.constant_result)
elif mult_factor.type.signed:
size_factor = (' * ((%s<0) ? 0:%s)' % (c_mult, c_mult))
else:
size_factor = (' * (%s)' % (c_mult,))
if ((self.type is tuple_type) and (self.is_literal or self.slow) and (not c_mult)):
code.putln(('%s = PyTuple_Pack(%d, %s); %s' % (target, len(self.args), ', '.join((arg.py_result() for arg in self.args)), code.error_goto_if_null(target, self.pos))))
code.put_gotref(target)
elif self.type.is_ctuple:
for (i, arg) in enumerate(self.args):
code.putln(('%s.f%s = %s;' % (target, i, arg.result())))
else:
if (self.type is list_type):
(create_func, set_item_func) = ('PyList_New', 'PyList_SET_ITEM')
elif (self.type is tuple_type):
(create_func, set_item_func) = ('PyTuple_New', 'PyTuple_SET_ITEM')
else:
raise InternalError(('sequence packing for unexpected type %s' % self.type))
arg_count = len(self.args)
code.putln(('%s = %s(%s%s); %s' % (target, create_func, arg_count, size_factor, code.error_goto_if_null(target, self.pos))))
code.put_gotref(target)
if c_mult:
counter = Naming.quick_temp_cname
code.putln(('{ Py_ssize_t %s;' % counter))
if (arg_count == 1):
offset = counter
else:
offset = ('%s * %s' % (counter, arg_count))
code.putln(('for (%s=0; %s < %s; %s++) {' % (counter, counter, c_mult, counter)))
else:
offset = ''
for i in range(arg_count):
arg = self.args[i]
if (c_mult or (not arg.result_in_temp())):
code.put_incref(arg.result(), arg.ctype())
code.put_giveref(arg.py_result())
code.putln(('%s(%s, %s, %s);' % (set_item_func, target, (((offset and i) and ('%s + %s' % (offset, i))) or (offset or i)), arg.py_result())))
if c_mult:
code.putln('}')
code.putln('}')
if ((mult_factor is not None) and mult_factor.type.is_pyobject):
code.putln(('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (Naming.quick_temp_cname, target, mult_factor.py_result(), code.error_goto_if_null(Naming.quick_temp_cname, self.pos))))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln(('%s = %s;' % (target, Naming.quick_temp_cname)))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if (self.mult_factor and self.mult_factor.type.is_int):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif ((self.type is tuple_type) and (self.is_literal or self.slow)):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
for arg in self.args:
arg.generate_post_assignment_code(code)
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False, exception_check=None, exception_value=None):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg('it', PyrexTypes.py_object_type, None)]))
def generate_parallel_assignment_code(self, rhs, code):
for item in self.unpacked_items:
item.allocate(code)
special_unpack = ((rhs.type is py_object_type) or (rhs.type in (tuple_type, list_type)) or (not rhs.type.is_builtin_type))
long_enough_for_a_loop = (len(self.unpacked_items) > 3)
if special_unpack:
self.generate_special_parallel_unpacking_code(code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln('{')
self.generate_generic_parallel_unpacking_code(code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln('}')
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = ('likely(%s != Py_None)' % rhs.py_result())
if (rhs.type is list_type):
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif (rhs.type is tuple_type):
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = ('likely(PyTuple_CheckExact(%s))' % rhs.py_result())
list_check = ('PyList_CheckExact(%s)' % rhs.py_result())
sequence_type_test = ('(%s) || (%s)' % (tuple_check, list_check))
code.putln(('if (%s) {' % sequence_type_test))
code.putln(('PyObject* sequence = %s;' % rhs.py_result()))
code.putln('Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);')
code.putln(('if (unlikely(size != %d)) {' % len(self.args)))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln(('if (size > %d) __Pyx_RaiseTooManyValuesError(%d);' % (len(self.args), len(self.args))))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln('else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);')
code.putln(code.error_goto(self.pos))
code.putln('}')
code.putln('#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS')
if (len(sequence_types) == 2):
code.putln(('if (likely(Py%s_CheckExact(sequence))) {' % sequence_types[0]))
for (i, item) in enumerate(self.unpacked_items):
code.putln(('%s = Py%s_GET_ITEM(sequence, %d); ' % (item.result(), sequence_types[0], i)))
if (len(sequence_types) == 2):
code.putln('} else {')
for (i, item) in enumerate(self.unpacked_items):
code.putln(('%s = Py%s_GET_ITEM(sequence, %d); ' % (item.result(), sequence_types[1], i)))
code.putln('}')
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln('#else')
if (not use_loop):
for (i, item) in enumerate(self.unpacked_items):
code.putln(('%s = PySequence_ITEM(sequence, %d); %s' % (item.result(), i, code.error_goto_if_null(item.result(), self.pos))))
code.put_gotref(item.result())
else:
code.putln('{')
code.putln('Py_ssize_t i;')
code.putln(('PyObject** temps[%s] = {%s};' % (len(self.unpacked_items), ','.join([('&%s' % item.result()) for item in self.unpacked_items]))))
code.putln(('for (i=0; i < %s; i++) {' % len(self.unpacked_items)))
code.putln(('PyObject* item = PySequence_ITEM(sequence, i); %s' % code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln('*(temps[i]) = item;')
code.putln('}')
code.putln('}')
code.putln('#endif')
rhs.generate_disposal_code(code)
if (sequence_type_test == '1'):
code.putln('}')
elif (sequence_type_test == none_check):
code.putln('} else {')
code.globalstate.use_utility_code(UtilityCode.load_cached('RaiseNoneIterError', 'ObjectHandling.c'))
code.putln(('__Pyx_RaiseNoneNotIterableError(); %s' % code.error_goto(self.pos)))
code.putln('}')
else:
code.putln('} else {')
self.generate_generic_parallel_unpacking_code(code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln('}')
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached('IterFinish', 'ObjectHandling.c'))
code.putln('Py_ssize_t index = -1;')
if use_loop:
code.putln(('PyObject** temps[%s] = {%s};' % (len(self.unpacked_items), ','.join([('&%s' % item.result()) for item in unpacked_items]))))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(('%s = PyObject_GetIter(%s); %s' % (iterator_temp, rhs.py_result(), code.error_goto_if_null(iterator_temp, self.pos))))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln(('%s = Py_TYPE(%s)->tp_iternext;' % (iternext_func, iterator_temp)))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = ('%s(%s)' % (iternext_func, iterator_temp))
if use_loop:
code.putln(('for (index=0; index < %s; index++) {' % len(unpacked_items)))
code.put(('PyObject* item = %s; if (unlikely(!item)) ' % unpack_code))
code.put_goto(unpacking_error_label)
code.put_gotref('item')
code.putln('*(temps[index]) = item;')
code.putln('}')
else:
for (i, item) in enumerate(unpacked_items):
code.put(('index = %d; %s = %s; if (unlikely(!%s)) ' % (i, item.result(), unpack_code, item.result())))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(UtilityCode.load_cached('UnpackItemEndCheck', 'ObjectHandling.c'))
code.put_error_if_neg(self.pos, ('__Pyx_IternextUnpackEndCheck(%s, %d)' % (unpack_code, len(unpacked_items))))
code.putln(('%s = NULL;' % iternext_func))
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln(('%s = NULL;' % iternext_func))
code.putln('if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);')
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for (i, arg) in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[(i + 1):]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(code, rhs, unpacked_fixed_items_left, use_loop=True, terminate=False)
for (i, item) in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln(('%s = PySequence_List(%s); %s' % (target_list, (iterator_temp or rhs.py_result()), code.error_goto_if_null(target_list, self.pos))))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln(('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list)))
code.putln(('if (unlikely(%s < %d)) {' % (length_temp, len(unpacked_fixed_items_right))))
code.putln(('__Pyx_RaiseNeedMoreValuesError(%d+%s); %s' % (len(unpacked_fixed_items_left), length_temp, code.error_goto(self.pos))))
code.putln('}')
for item in unpacked_fixed_items_right[::(- 1)]:
item.allocate(code)
for (i, (item, coerced_arg)) in enumerate(zip(unpacked_fixed_items_right[::(- 1)], self.coerced_unpacked_items[::(- 1)])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln(('%s = PyList_GET_ITEM(%s, %s-%d); ' % (item.py_result(), target_list, length_temp, (i + 1))))
code.putln(('((PyVarObject*)%s)->ob_size--;' % target_list))
code.putln('#else')
code.putln(('%s = PySequence_ITEM(%s, %s-%d); ' % (item.py_result(), target_list, length_temp, (i + 1))))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right), code.error_goto_if_null(sublist_temp, self.pos))))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln(('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp)))
code.putln('#else')
code.putln(('(void)%s;' % sublist_temp))
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for (i, arg) in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code) |
def seed(seed=None):
if (isinstance(seed, str) and (seed == 'default')):
backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if (state.shape != (55,)):
raise ValueError('invalid input size')
elif ((state.min() < 0) or (state.max() > 1)):
raise ValueError('values not in range [0,1]')
backend.id_srandi(state)
elif (seed is None):
backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
backend.id_srandi(rnd.rand(55)) |
(scope='module')
def simulation_one_loop(atomic_data_fname, config, tardis_ref_data, generate_reference):
config.atom_data = atomic_data_fname
config.montecarlo.iterations = 2
config.montecarlo.no_of_packets = int(40000.0)
config.montecarlo.last_no_of_packets = int(40000.0)
simulation = Simulation.from_config(config)
simulation.run_convergence()
simulation.run_final()
return simulation |
def is_FunctionField(x):
if isinstance(x, FunctionField):
return True
return (x in FunctionFields()) |
class DataPrefetcher():
def __init__(self, loader, device, init=False):
self.loader = loader
self.iter = None
self.stream = torch.cuda.Stream()
if init:
self.iter = self.loader
self.preload()
def __len__(self):
return len(self.loader)
def preload(self):
try:
self.next_batch = next(self.iter)
except StopIteration:
self.next_batch = None
return
with torch.cuda.stream(self.stream):
self.next_batch = self.next_batch.text.cuda(non_blocking=True)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.next_batch
self.preload()
return batch
def __iter__(self):
self.iter = iter(self.loader)
self.preload()
while True:
batch = self.next()
if (batch is None):
break
(yield batch) |
def WloopIN_Y(X1, X2, S, E):
Data = np.append(X1, X2, axis=1)
(row, col) = Data.shape
ab = np.ones(row)
ab.shape = (1, row)
Data = np.insert(Data, 1, (S * ab), axis=1)
Data = np.insert(Data, 3, X1.T, axis=1)
Data = np.insert(Data, 4, (E * ab), axis=1)
Data = np.insert(Data, 5, X2.T, axis=1)
return Data |
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
BaseConstructor.__init__(self)
BaseResolver.__init__(self) |
_module()
class DRIVEDataset(CustomDataset):
CLASSES = ('background', 'vessel')
PALETTE = [[120, 120, 120], [6, 230, 230]]
def __init__(self, **kwargs):
super(DRIVEDataset, self).__init__(img_suffix='.png', seg_map_suffix='_manual1.png', reduce_zero_label=False, **kwargs)
assert self.file_client.exists(self.img_dir) |
def test_augment_ratio():
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
should_augment = (lambda x: (x >= 3))
can_augment = (lambda x: (x >= 4))
assert (get_augment_ratio(data, should_augment, can_augment, desired_ratio=0.1) == 0.0)
with pytest.raises(AssertionError):
get_augment_ratio(data, can_augment, should_augment)
assert (get_augment_ratio(data, should_augment, can_augment, desired_ratio=0.4) == pytest.approx((2 / 7))) |
class FreeAlgebraQuotientElement(AlgebraElement):
def __init__(self, A, x):
AlgebraElement.__init__(self, A)
Q = self.parent()
if (isinstance(x, FreeAlgebraQuotientElement) and (x.parent() == Q)):
self.__vector = Q.module()(x.vector())
return
if isinstance(x, (Integer, int)):
self.__vector = (Q.module().gen(0) * x)
return
elif (isinstance(x, FreeModuleElement) and (x.parent() is Q.module())):
self.__vector = x
return
elif (isinstance(x, FreeModuleElement) and (x.parent() == A.module())):
self.__vector = x
return
R = A.base_ring()
M = A.module()
F = A.monoid()
B = A.monomial_basis()
if isinstance(x, (Integer, int)):
self.__vector = (x * M.gen(0))
elif (isinstance(x, RingElement) and (not isinstance(x, AlgebraElement)) and (x in R)):
self.__vector = (x * M.gen(0))
elif (isinstance(x, FreeMonoidElement) and (x.parent() is F)):
if (x in B):
self.__vector = M.gen(B.index(x))
else:
raise AttributeError(('argument x (= %s) is not in monomial basis' % x))
elif (isinstance(x, list) and (len(x) == A.dimension())):
try:
self.__vector = M(x)
except TypeError:
raise TypeError(('argument x (= %s) is of the wrong type' % x))
elif (isinstance(x, FreeAlgebraElement) and (x.parent() is A.free_algebra())):
self.__vector = M(0)
for (m, c) in x._FreeAlgebraElement__monomial_coefficients.items():
self.__vector += (c * M.gen(B.index(m)))
elif isinstance(x, dict):
self.__vector = M(0)
for (m, c) in x.items():
self.__vector += (c * M.gen(B.index(m)))
elif (isinstance(x, AlgebraElement) and (x.parent().ambient_algebra() is A)):
self.__vector = x.ambient_algebra_element().vector()
else:
raise TypeError(('argument x (= %s) is of the wrong type' % x))
def _repr_(self):
Q = self.parent()
M = Q.monoid()
with localvars(M, Q.variable_names()):
cffs = list(self.__vector)
mons = Q.monomial_basis()
return repr_lincomb(zip(mons, cffs), strip_one=True)
def _latex_(self):
Q = self.parent()
M = Q.monoid()
with localvars(M, Q.variable_names()):
cffs = tuple(self.__vector)
mons = Q.monomial_basis()
return repr_lincomb(zip(mons, cffs), is_latex=True, strip_one=True)
def vector(self):
return self.__vector
def _richcmp_(self, right, op):
return richcmp(self.vector(), right.vector(), op)
def __neg__(self):
y = self.parent()(0)
y.__vector = (- self.__vector)
return y
def _add_(self, y):
A = self.parent()
z = A(0)
z.__vector = (self.__vector + y.__vector)
return z
def _sub_(self, y):
A = self.parent()
z = A(0)
z.__vector = (self.__vector - y.__vector)
return z
def _mul_(self, y):
A = self.parent()
def monomial_product(X, w, m):
mats = X._FreeAlgebraQuotient__matrix_action
for (j, k) in m._element_list:
M = mats[int(j)]
for _ in range(k):
w *= M
return w
u = self.__vector.__copy__()
v = y.__vector
z = A(0)
B = A.monomial_basis()
for i in range(A.dimension()):
c = v[i]
if (c != 0):
z.__vector += monomial_product(A, (c * u), B[i])
return z
def _rmul_(self, c):
return self.parent([(c * a) for a in self.__vector])
def _lmul_(self, c):
return self.parent([(a * c) for a in self.__vector]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.