code stringlengths 101 5.91M |
|---|
def make_prediction_list(datasets: dict[(DevTest, list[RawData])], predictions: dict[(DevTest, dict[(str, dict)])]) -> tuple[(dict[(DevTest, list[int])], dict[(DevTest, list[float])])]:
scores_dict_of_list: dict[(DevTest, list[float])] = {}
labels_dict_of_list: dict[(DevTest, list[int])] = {}
for split in ['dev', 'test']:
scores_dict_of_list[split] = []
labels_dict_of_list[split] = []
for d in datasets[split]:
if (d['meta']['id'] in predictions[split].keys()):
score = predictions[split][d['meta']['id']]
scores_dict_of_list[split].append(score)
labels_dict_of_list[split].append(label_str_to_int[d['label']])
return (labels_dict_of_list, scores_dict_of_list) |
def read_data(prefix, labels_dic, mixing, files_from_cl):
image_list = sorted(map((lambda x: os.path.join(prefix, x)), filter((lambda x: x.endswith('JPEG')), files_from_cl)))
prefix2 = np.array([file_i.split((prefix + '/'))[1].split('_')[0] for file_i in image_list])
labels_list = np.array([mixing[labels_dic[i]] for i in prefix2])
assert (len(image_list) == len(labels_list))
images = tf.convert_to_tensor(image_list, dtype=tf.string)
labels = tf.convert_to_tensor(labels_list, dtype=tf.int32)
input_queue = tf.train.slice_input_producer([images, labels], shuffle=True, capacity=2000)
image_file_content = tf.read_file(input_queue[0])
label = input_queue[1]
image = tf.image.resize_images(tf.image.decode_jpeg(image_file_content, channels=3), [256, 256])
image = tf.random_crop(image, [224, 224, 3])
image = tf.image.random_flip_left_right(image)
return (image, label) |
def test_init_negative_approach_level():
with pytest.raises(AssertionError):
ControlFlowDistance(approach_level=(- 1)) |
def main():
try:
(opts, args) = getopt.getopt(sys.argv[1:], '')
except:
usage(sys.argv[0])
for (opt, arg) in opts:
usage(sys.argv[0])
if (len(args) != 3):
usage(sys.argv[0])
num_pairs = int(args[0])
N = int(args[1])
Np = int(args[2])
if (Np > N):
sys.stderr.write('ERROR: Must have Np <= N\n')
sys.exit(1)
for i in xrange(num_pairs):
nodepair_i = random.randint(0, (Np - 1))
nodepair_j = random.randint(0, ((N - Np) - 1))
sys.stdout.write(('%d %d\n' % (nodepair_i, nodepair_j))) |
class AcquisitionFunctionBuilder(Generic[ProbabilisticModelType], ABC):
def prepare_acquisition_function(self, models: Mapping[(Tag, ProbabilisticModelType)], datasets: Optional[Mapping[(Tag, Dataset)]]=None) -> AcquisitionFunction:
def update_acquisition_function(self, function: AcquisitionFunction, models: Mapping[(Tag, ProbabilisticModelType)], datasets: Optional[Mapping[(Tag, Dataset)]]=None) -> AcquisitionFunction:
return self.prepare_acquisition_function(models, datasets=datasets) |
class TestSensitivityMetricInterestPoints(unittest.TestCase):
def test_filtered_interest_points_set(self):
in_model = DenseNet121()
(ips, graph, fw_info) = build_ip_list_for_test(in_model, num_interest_points_factor=0.5)
sorted_nodes = graph.get_topo_sorted_nodes()
ip_nodes = list(filter((lambda n: KerasImplementation().count_node_for_mixed_precision_interest_points(n)), sorted_nodes))
self.assertTrue((len(ips) <= (0.5 * len(ip_nodes))), f'Filtered interest points list should include not more than {(0.5 * len(ip_nodes))}, but it includes {len(ips)}')
def test_nonfiltered_interest_points_set(self):
in_model = MobileNetV2()
(ips, graph, fw_info) = build_ip_list_for_test(in_model, num_interest_points_factor=1.0)
sorted_nodes = graph.get_topo_sorted_nodes()
ip_nodes = list(filter((lambda n: KerasImplementation().count_node_for_mixed_precision_interest_points(n)), sorted_nodes))
self.assertTrue((len(ips) == (len(ip_nodes) - 1)), f'Filtered interest points list should include exactly {len(ip_nodes)}, but itincludes {len(ips)}')
def test_invalid_interest_points_factor(self):
in_model = MobileNetV2()
with self.assertRaises(Exception):
(ips, graph, fw_info) = build_ip_list_for_test(in_model, num_interest_points_factor=1.1)
with self.assertRaises(Exception):
(ips, graph, fw_info) = build_ip_list_for_test(in_model, num_interest_points_factor=0)
def test_softmax_interest_point(self):
in_model = softmax_model((16, 16, 3))
(ips, graph, fw_info) = build_ip_list_for_test(in_model, num_interest_points_factor=1.0)
softmax_nodes = [n for n in graph.get_topo_sorted_nodes() if ((n.layer_class == tf.keras.layers.Softmax) or ((n.layer_class == TFOpLambda) and (n.framework_attr['function'] == 'nn.softmax')))]
softmax_node2layer = {n: [l for l in in_model.layers if isinstance(l, n.layer_class)][0] for n in softmax_nodes}
self.assertTrue((len(softmax_nodes) == 2))
for sn in softmax_nodes:
self.assertIn(sn, ips, f'Expecting a softmax layer to be considered as interest point for mixed precision distance metric but node {sn.name} is missing.')
t1 = softmax_node2layer[sn](np.random.rand(*[8, *softmax_node2layer[sn].input_shape[1:]])).numpy()
t2 = softmax_node2layer[sn](np.random.rand(*[8, *softmax_node2layer[sn].input_shape[1:]])).numpy()
axis = sn.framework_attr.get(AXIS)
if (axis is None):
axis = sn.op_call_kwargs.get(AXIS)
distance_fn = KerasImplementation().get_node_distance_fn(layer_class=sn.layer_class, framework_attrs=sn.framework_attr, axis=axis)
self.assertEqual(distance_fn, compute_kl_divergence, f'Softmax node should use KL Divergence for distance computation.')
distance_per_softmax_axis = distance_fn(t1, t2, batch=True, axis=axis)
distance_global = distance_fn(t1, t2, batch=True, axis=None)
self.assertFalse(np.isclose(np.mean(distance_per_softmax_axis), distance_global), f'Computing distance for softmax node on softmax activation axis should be different than on than computing on the entire tensor.') |
.parametrize('vec', [[1, 1], [1, 0.01], [0.01, 1], [0, 0, 0]])
def test_axisvec2axis_no_primary_coordinate_raises_value_error(vec):
with pytest.raises(ValueError, match='no valid primary coordinate axis'):
axisvec2axis(vec) |
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
cls.add_method('Cancel', 'void', [])
cls.add_method('GetContext', 'uint32_t', [], is_const=True)
cls.add_method('GetTs', 'uint64_t', [], is_const=True)
cls.add_method('GetUid', 'uint32_t', [], is_const=True)
cls.add_method('IsExpired', 'bool', [], is_const=True)
cls.add_method('IsRunning', 'bool', [], is_const=True)
cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True)
return |
def train(category):
print(('counter = %d, train for category %s' % (counter, category)))
print(cameraURL)
for i in range(10):
response = requests.get(cameraURL)
img = Image.open(BytesIO(response.content))
emb = engine.DetectWithImage(img)
engine.addEmbedding(emb, category) |
class Module(object):
def __init__(self, name, members):
self.name = name
self.members = members
def __getattr__(self, name):
try:
return self.members[name]
except KeyError:
raise RuntimeError(f'Module {self.name} has no member called {name}') from None |
def batch_iter(X, batch_size=args.batch_size, shuffle=False):
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
(yield X[batch_idxs]) |
def load_data(path, test_strat_id=None, cuda=None):
data = joblib.load(path)
type_remap = (- np.ones((int(data['features']['atom_types'].max()) + 1)))
unique_types = np.unique(data['features']['atom_types']).astype(int)
type_remap[unique_types] = np.arange(len(unique_types))
data['features']['atom_types'] = type_remap[data['features']['atom_types'].astype(int)]
data['features']['geometry'] = torch.FloatTensor(data['features']['geometry'].astype(np.float32))
data['features']['atom_types'] = torch.LongTensor(data['features']['atom_types'].astype(np.int64))
data['targets'] = torch.from_numpy(data['targets'])
if (cuda is not None):
data['features']['geometry'].cuda(cuda)
data['features']['atom_types'].cuda(cuda)
data['targets'].cuda(cuda)
train = np.ndarray(0)
test = np.ndarray(0)
if (not test_strat_id):
test_strat_id = np.random.randint(len(data['strats']))
for i in range(len(data['strats'])):
if (i != test_strat_id):
train = np.concatenate((train, data['strats'][i]))
else:
test = np.concatenate((test, data['strats'][i]))
return (data, train, test) |
def build_hoi_test_loader(cfg, dataset_name, mapper=None):
dataset_dicts = get_hoi_dataset_dicts([dataset_name], filter_empty=False)
dataset = DatasetFromList(dataset_dicts)
if (mapper is None):
mapper = HOIDatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = samplers.InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(dataset, num_workers=cfg.DATALOADER.NUM_WORKERS, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator)
return data_loader |
def flatten_model(m):
return (sum(map(flatten_model, m.children()), []) if len(list(m.children())) else [m]) |
class DifferentiableArray(ak.Array):
def __init__(self, aux_data, tracers):
self.aux_data = aux_data
self.tracers = tracers
def layout(self):
buffers = dict(self.aux_data.indexes)
for (key, tracer) in zip(self.aux_data.datakeys, self.tracers):
if hasattr(tracer, 'primal'):
buffers[key] = tracer.primal
return ak.from_buffers(self.aux_data.form, self.aux_data.length, buffers, highlevel=False)
def layout(self, layout):
raise ValueError('this operation cannot be performed in a JAX-compiled or JAX-differentiated function')
def __getitem__(self, where):
out = self.layout[where]
if isinstance(out, ak.layout.Content):
(form, length, indexes) = ak.to_buffers(out, form_key='getitem_node{id}', virtual='pass')
aux_data = AuxData(form, length, indexes, self.aux_data.datakeys)
return DifferentiableArray(aux_data, self.tracers)
else:
return out
def __setitem__(self, where, what):
raise ValueError('this operation cannot be performed in a JAX-compiled or JAX-differentiated function')
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
for x in inputs:
if isinstance(x, DifferentiableArray):
assert (x.aux_data == self.aux_data)
assert (len(x.tracers) == len(self.tracers))
for (name, np_ufunc) in np.core.umath.__dict__.items():
if (ufunc is np_ufunc):
ufunc = getattr(jax.numpy, name)
nexttracers = []
for i in range(len(self.tracers)):
nextinputs = [(x.tracers[i] if isinstance(x, DifferentiableArray) else x) for x in inputs]
nexttracers.append(getattr(ufunc, method)(*nextinputs, **kwargs))
return DifferentiableArray(self.aux_data, nexttracers) |
def draw_keypoints(img, corners, color, radius=3, s=3):
img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1))
for c in np.stack(corners).T:
cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1))
return img |
def test_categorical_option():
pytest.importorskip('pyarrow')
array = ak.str.to_categorical(['do', 're', 'mi', 'fa', 'so', None])
form_from_type = ak.forms.from_type(array.type.content)
assert (form_from_type == array.layout.form) |
def get_vocab(dataset, vocab_size):
if (vocab_size == 'null'):
return None
return pickle.load(open(f'data/{dataset}/vocab_{vocab_size}.pickle', 'rb')) |
def prepare_tag(split, src, datadir, eval=False, max_len=512, stride=300, data=None, suffix='', offset=0, jsonl=True):
def _check_is_max_context(doc_spans, cur_span_index, position):
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = ((doc_span.start + doc_span.length) - 1)
if (position < doc_span.start):
continue
if (position > end):
continue
num_left_context = (position - doc_span.start)
num_right_context = (end - position)
score = (min(num_left_context, num_right_context) + (0.01 * doc_span.length))
if ((best_score is None) or (score > best_score)):
best_score = score
best_span_index = span_index
return (cur_span_index == best_span_index)
nlp = English()
prefix = f'{datadir}/{split}'
from_file = False
if (data is None):
with open(f'{prefix}.oracle_word.pickle', 'rb') as fin:
data = pickle.load(fin)
from_file = True
json_suffix = ('.jsonl' if jsonl else '')
if ((split == 'val') and eval):
outpath = f'{datadir}/valeval.seqlabel{json_suffix}{suffix}'
else:
outpath = f'{prefix}.seqlabel{json_suffix}{suffix}'
with open(outpath, 'w') as fout:
for example_index in range(len(data)):
(k, v) = (example_index, data[example_index])
if ((example_index % 1000) == 0):
print('processed {} examples'.format(example_index))
src_doc = (Doc(nlp.vocab).from_bytes(v['src_doc']) if from_file else v['src_doc'])
_DocSpan = namedtuple('DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
label_list = ([0] * len(src_doc))
for s in v['oracle_tok']:
label_list[s[0]] = 1
while (start_offset < len(src_doc)):
length = (len(src_doc) - start_offset)
if (length > max_len):
length = max_len
doc_spans.append(_DocSpan(start=start_offset, length=length))
if ((start_offset + length) == len(src_doc)):
break
start_offset += min(length, stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
if ((split != 'test') and (not eval) and (sum(label_list[doc_span.start:(doc_span.start + length)]) == 0)):
continue
if jsonl:
json_record = {'id': (example_index + offset), 'tokens': [], 'labels': [], 'max_context': []}
for i in range(doc_span.length):
pos = (doc_span.start + i)
is_max_context = _check_is_max_context(doc_spans, doc_span_index, pos)
if jsonl:
json_record['tokens'].append(src_doc[pos].text)
json_record['labels'].append(label_list[pos])
json_record['max_context'].append(int(is_max_context))
else:
fout.write('{} {} {:d} {:d}\n'.format(src_doc[pos].text, label_list[pos], is_max_context, (example_index + offset)))
if jsonl:
fout.write(json.dumps(json_record, ensure_ascii=False))
fout.write('\n')
return outpath |
_toolkit()
class GitHub(FunctionToolkit):
name_for_human = 'GitHub'
description_for_human = 'Toolkit for managing GitHub repositories and user details.'
name_for_model = 'GitHub'
description_for_model = 'A toolkit for managing GitHub repositories, including searching for repositories, retrieving repository details, searching and reading issues, managing collaborators, listing and reading repository files, as well as searching for users and retrieving user details.'
tool_classes = [GitHubSearchRepositories, GitHubGetRepositoryDetails, GitHubCreateRepository, GitHubDeleteRepository, GitHubUpdateRespository, GitHubPostIssue, GitHubSearchIssues, GitHubReadIssue, GitHubManageCollaborator, GitHubSearchUsers, GitHubGetUserDetails] |
class ColorJitter(object):
def __init__(self, brightness=0.5, contrast=0.5, saturation=0.5):
if ((not (brightness is None)) and (brightness > 0)):
self.brightness = [max((1 - brightness), 0), (1 + brightness)]
if ((not (contrast is None)) and (contrast > 0)):
self.contrast = [max((1 - contrast), 0), (1 + contrast)]
if ((not (saturation is None)) and (saturation > 0)):
self.saturation = [max((1 - saturation), 0), (1 + saturation)]
def __call__(self, img, mask=None):
r_brightness = random.uniform(self.brightness[0], self.brightness[1])
r_contrast = random.uniform(self.contrast[0], self.contrast[1])
r_saturation = random.uniform(self.saturation[0], self.saturation[1])
img = ImageEnhance.Brightness(img).enhance(r_brightness)
img = ImageEnhance.Contrast(img).enhance(r_contrast)
img = ImageEnhance.Color(img).enhance(r_saturation)
if (mask is None):
return img
else:
return (img, mask) |
class DataPreprocessor():
def __init__(self, data_augmenter_spec: DataAugmenterSpec):
self.data_augmenter_spec: DataAugmenterSpec = data_augmenter_spec
(None)
def preprocess(self, instances: List[Instance], parallelism: int=1) -> List[Instance]:
data_augmenter: DataAugmenter = create_data_augmenter(self.data_augmenter_spec)
train_instances: List[Instance] = [instance for instance in instances if (instance.split == TRAIN_SPLIT)]
if self.data_augmenter_spec.should_augment_train_instances:
train_instances = data_augmenter.generate(train_instances, include_original=self.data_augmenter_spec.should_include_original_train, skip_unchanged=self.data_augmenter_spec.should_skip_unchanged_train, seeds_per_instance=self.data_augmenter_spec.seeds_per_instance, parallelism=parallelism)
eval_instances: List[Instance] = [instance for instance in instances if (instance.split in EVAL_SPLITS)]
if self.data_augmenter_spec.should_augment_eval_instances:
eval_instances = data_augmenter.generate(eval_instances, include_original=self.data_augmenter_spec.should_include_original_eval, skip_unchanged=self.data_augmenter_spec.should_skip_unchanged_eval, seeds_per_instance=self.data_augmenter_spec.seeds_per_instance, parallelism=parallelism)
return (train_instances + eval_instances) |
def custom_augment(image):
image = image['image']
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = random_apply(tf.image.flip_left_right, image, p=0.5)
image = random_apply(translate, image, p=0.5)
image = random_apply(gaussian_blur, image, p=0.5)
image = random_apply(color_jitter, image, p=0.8)
image = random_apply(color_drop, image, p=0.2)
return image |
def register_Ns3ThreeGppHttpHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::ThreeGppHttpHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetClientTs', 'ns3::Time', [], is_const=True)
cls.add_method('GetContentLength', 'uint32_t', [], is_const=True)
cls.add_method('GetContentType', 'ns3::ThreeGppHttpHeader::ContentType_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetServerTs', 'ns3::Time', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetClientTs', 'void', [param('ns3::Time', 'clientTs')])
cls.add_method('SetContentLength', 'void', [param('uint32_t', 'contentLength')])
cls.add_method('SetContentType', 'void', [param('ns3::ThreeGppHttpHeader::ContentType_t', 'contentType')])
cls.add_method('SetServerTs', 'void', [param('ns3::Time', 'serverTs')])
cls.add_method('ToString', 'std::string', [], is_const=True)
return |
class NoRepeatNGramLogitsProcessor():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
_utils.test(exclude=[ti.amdgpu])
def test_arg_4k():
vec1024 = ti.types.vector(1024, ti.i32)
def bar(a: vec1024) -> ti.i32:
ret = 0
for i in range(1024):
ret += a[i]
return ret
a = vec1024([i for i in range(1024)])
assert (bar(a) == 523776) |
def show():
for (name, info_dict) in globals().items():
if ((name[0] == '_') or (type(info_dict) is not type({}))):
continue
print((name + ':'))
if (not info_dict):
print(' NOT AVAILABLE')
for (k, v) in info_dict.items():
v = str(v)
if ((k == 'sources') and (len(v) > 200)):
v = ((v[:60] + ' ...\n... ') + v[(- 60):])
print((' %s = %s' % (k, v))) |
def SetAdd(s, e):
ctx = _ctx_from_ast_arg_list([s, e])
e = _py2expr(e, ctx)
return ArrayRef(Z3_mk_set_add(ctx.ref(), s.as_ast(), e.as_ast()), ctx) |
def get_cached_models(cache_dir: Union[(str, Path)]=None) -> List[Tuple]:
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
elif isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if (not os.path.isdir(cache_dir)):
return []
cached_models = []
for file in os.listdir(cache_dir):
if file.endswith('.json'):
meta_path = os.path.join(cache_dir, file)
with open(meta_path, encoding='utf-8') as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
if url.endswith('.bin'):
size_MB = (os.path.getsize(meta_path.strip('.json')) / 1000000.0)
cached_models.append((url, etag, size_MB))
return cached_models |
def general_stats_data_public(path):
df = pd.read_json(path)
query_type_label = {'LOCATION': 0, 'DESCRIPTION': 0, 'NUMERIC': 0, 'ENTITY': 0, 'PERSON': 0}
total_size = len(df)
for row in df.iterrows():
category = row[1]['query_type']
if (category in query_type_label):
query_type_label[category] += 1
print('Columns:{}'.format(df.columns.values))
print('{} queries'.format(total_size))
print('----query distribution by dataset type----')
for key in query_type_label:
print(((((key + ',') + str(query_type_label[key])) + ',') + str((query_type_label[key] / total_size)))) |
def _get_initial_states(self, client_id, observation, policy: Policy, identifier):
if ((client_id is not None) and (len(self.clients[client_id].rnn_states[identifier]) > 0)):
return self.clients[client_id].rnn_states[identifier][(- 1)]
else:
offset = len(policy.preprocessor.shape)
if (offset < len(observation.shape)):
batch_size = reduce(mul, observation.shape[:(- offset)])
else:
batch_size = 1
return policy.get_initial_state(batch_size=batch_size) |
def cleaning(x):
x = re.sub(re.compile('<.*?>'), '', x)
x = re.compile('<\\s*style[^>]*>.*?<\\s*/\\s*style\\s*>', (re.S | re.I)).sub('', x)
x = re.compile('<\\s*script[^>]*>.*?<\\s*/\\s*script\\s*>', (re.S | re.I)).sub('', x)
x = clean(x, fix_unicode=True, to_ascii=False, lower=True, no_line_breaks=True, no_urls=True, no_emails=True, no_phone_numbers=True, no_numbers=False, no_digits=False, no_currency_symbols=True, no_punct=False, replace_with_url=' ', replace_with_email=' ', replace_with_phone_number=' ', replace_with_number=' ', replace_with_digit='', replace_with_currency_symbol=' ')
x = normalizer.normalize(x)
wierd_pattern = re.compile('[---\U0001f6ff\U0001f1e0--M---\U0010ffff\u200d--\u2069\u2066\u2068\u2067]+', flags=re.UNICODE)
x = wierd_pattern.sub('', x)
x = re.sub('(\\d+(\\.\\d+)?)', ' \\1 ', x)
x = re.sub('#', ' ', x)
x = re.sub('', ' ', x)
x = re.sub('([-])\\1{3,}', '\\1', x)
x = re.sub('([.])\\1{3,}', '\\1', x)
x = re.sub('\\s+', ' ', x)
return x.strip() |
def save_secondary_output(model, out_file, ranked_results, secondary_output, max_sec_i):
filtered_secondary_output = {}
for (q_id, ranked_doc_ids) in ranked_results.items():
filtered_secondary_output[q_id] = {}
for (i, doc_id) in enumerate(ranked_doc_ids):
if (i == max_sec_i):
break
filtered_secondary_output[q_id][doc_id] = secondary_output[q_id][doc_id]
model_data_secondary = model.get_param_secondary()
model_data_secondary_cpu = {}
for (tens_name, tens) in model_data_secondary.items():
model_data_secondary_cpu[tens_name] = tens.data.cpu().numpy()
numpy.savez_compressed(out_file, model_data=model_data_secondary_cpu, qd_data=filtered_secondary_output, per_batch_info=secondary_output['per_batch_info']) |
def get_rng_state_all():
results = []
for i in range(device_count()):
with device_ctx_manager(i):
results.append(get_rng_state())
return results |
def get_loss(factorexprs, gold_fes, valid_fes, sentlen):
if (options.loss == 'hinge'):
return get_hinge_loss(factorexprs, gold_fes, valid_fes, sentlen)
goldfactors = [Factor(span[0], span[1], feid) for feid in gold_fes for span in gold_fes[feid]]
numeratorexprs = [factorexprs[gf] for gf in goldfactors]
numerator = dy.esum(numeratorexprs)
if (options.loss == 'log'):
partition = get_logloss_partition(factorexprs, valid_fes, sentlen)
elif (options.loss == 'softmaxm'):
partition = get_softmax_margin_partition(factorexprs, goldfactors, valid_fes, sentlen)
else:
raise Exception('undefined loss function', options.loss)
lossexp = (partition - numerator)
if (partition.scalar_value() < numerator.scalar_value()):
sys.stderr.write(('WARNING: partition ~~ numerator! possibly overfitting difference = %f\n' % lossexp.scalar_value()))
return None
if (lossexp.scalar_value() < 0.0):
sys.stderr.write((str(gold_fes) + '\ngolds\n'))
gsum = 0
for fac in goldfactors:
gsum += factorexprs[fac].scalar_value()
sys.stderr.write((((fac.to_str(FEDICT) + ' ') + str(factorexprs[fac].scalar_value())) + '\n'))
sys.stderr.write((((('my calculation = ' + str(gsum)) + ' vs ') + str(numerator.scalar_value())) + '\n'))
for j in range(sentlen):
sys.stderr.write(((':' + str(j)) + '\t'))
if ((not USE_SPAN_CLIP) or (j <= ALLOWED_SPANLEN)):
sys.stderr.write('0 ')
istart = 0
if (USE_SPAN_CLIP and (j > ALLOWED_SPANLEN)):
istart = max(0, ((j - ALLOWED_SPANLEN) - 1))
for i in range(istart, j):
sys.stderr.write((str((i + 1)) + ' '))
sys.stderr.write('\n')
raise Exception('negative probability! probably overcounting spans?', numerator.scalar_value(), partition.scalar_value(), lossexp.scalar_value())
return lossexp |
class MiT(nn.Module):
def __init__(self, model_name: str='B0'):
super().__init__()
assert (model_name in mit_settings.keys()), f'MiT model name should be in {list(mit_settings.keys())}'
(embed_dims, depths) = mit_settings[model_name]
drop_path_rate = 0.1
self.channels = embed_dims
self.patch_embed1 = PatchEmbed(3, embed_dims[0], 7, 4)
self.patch_embed2 = PatchEmbed(embed_dims[0], embed_dims[1], 3, 2)
self.patch_embed3 = PatchEmbed(embed_dims[1], embed_dims[2], 3, 2)
self.patch_embed4 = PatchEmbed(embed_dims[2], embed_dims[3], 3, 2)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
self.block1 = nn.ModuleList([Block(embed_dims[0], 1, 8, dpr[(cur + i)]) for i in range(depths[0])])
self.norm1 = nn.LayerNorm(embed_dims[0])
cur += depths[0]
self.block2 = nn.ModuleList([Block(embed_dims[1], 2, 4, dpr[(cur + i)]) for i in range(depths[1])])
self.norm2 = nn.LayerNorm(embed_dims[1])
cur += depths[1]
self.block3 = nn.ModuleList([Block(embed_dims[2], 5, 2, dpr[(cur + i)]) for i in range(depths[2])])
self.norm3 = nn.LayerNorm(embed_dims[2])
cur += depths[2]
self.block4 = nn.ModuleList([Block(embed_dims[3], 8, 1, dpr[(cur + i)]) for i in range(depths[3])])
self.norm4 = nn.LayerNorm(embed_dims[3])
def forward(self, x: Tensor) -> Tensor:
B = x.shape[0]
(x, H, W) = self.patch_embed1(x)
for blk in self.block1:
x = blk(x, H, W)
x1 = self.norm1(x).reshape(B, H, W, (- 1)).permute(0, 3, 1, 2)
(x, H, W) = self.patch_embed2(x1)
for blk in self.block2:
x = blk(x, H, W)
x2 = self.norm2(x).reshape(B, H, W, (- 1)).permute(0, 3, 1, 2)
(x, H, W) = self.patch_embed3(x2)
for blk in self.block3:
x = blk(x, H, W)
x3 = self.norm3(x).reshape(B, H, W, (- 1)).permute(0, 3, 1, 2)
(x, H, W) = self.patch_embed4(x3)
for blk in self.block4:
x = blk(x, H, W)
x4 = self.norm4(x).reshape(B, H, W, (- 1)).permute(0, 3, 1, 2)
return (x1, x2, x3, x4) |
class set_scriptable():
def __init__(self, mode: bool) -> None:
global _SCRIPTABLE
self.prev = _SCRIPTABLE
_SCRIPTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
_SCRIPTABLE = self.prev
return False |
def validate_jp_cn(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(cn.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(cn.is_valid)
else:
return df.applymap(cn.is_valid)
return cn.is_valid(df) |
def projected_memory_usage(node: V1Node, pod: Optional[V1Pod], usage: Dict[(str, Union[(int, float)])]) -> Union[(int, float)]:
try:
usage = usage[node.metadata.name]
except KeyError:
usage = 0
if (pod is not None):
usage += pod_sum_resources_requests(pod, ('intel.com/sgx' if pod_requests_sgx(pod) else 'memory'))
return usage |
class SympyOverridesTest(TestCase):
def test_solve(self) -> None:
(x, y) = sf.symbols('x y')
solution = sf.solve(((x - 2) * (x + y)), x)
self.assertIsInstance(solution, T.List)
self.assertEqual(set(solution), {2, (- y)})
solution = sf.solve(2, x)
self.assertIsInstance(solution, T.List)
self.assertEqual(set(solution), set())
def test_derivatives(self) -> None:
(x, y) = sf.symbols('x y')
self.assertEqual(sf.floor(x).diff(x), 0)
self.assertEqual(sf.floor((x ** 2)).diff(x), 0)
self.assertEqual(sf.sign(x).diff(x), 0)
self.assertEqual(sf.sign((x ** 2)).diff(x), 0)
def numerical_derivative(f: T.Callable[([sf.Scalar], sf.Scalar)], x: sf.Scalar, delta: float=1e-08) -> float:
return float(((f((x + delta)) - f((x - delta))) / (2 * delta)))
for (nx, ny) in ((5, 2), ((- 5), 2), (5, (- 2)), ((- 5), (- 2))):
self.assertAlmostEqual(float(sf.Mod(x, y).diff(x).subs({x: nx, y: ny})), numerical_derivative((lambda _x: sf.Mod(x, y).subs({x: _x, y: ny})), nx))
self.assertAlmostEqual(float(sf.Mod(x, y).diff(y).subs({x: nx, y: ny})), numerical_derivative((lambda _y: sf.Mod(x, y).subs({x: nx, y: _y})), ny)) |
def substruct2smi(molecule, partitioning, cg_bead, cgbeads, ringatoms):
frag = rdchem.EditableMol(molecule)
num_atoms = molecule.GetConformer().GetNumAtoms()
for i in range(num_atoms):
if (molecule.GetAtomWithIdx(i).GetSymbol() == 'H'):
submol = frag.GetMol()
for j in range(submol.GetConformer().GetNumAtoms()):
if (molecule.GetConformer().GetAtomPosition(i)[0] == submol.GetConformer().GetAtomPosition(j)[0]):
frag.RemoveAtom(j)
atoms_in_ring = []
for ring in ringatoms:
if (cgbeads[cg_bead] in ring):
atoms_in_ring = ring[:]
break
for i in partitioning.keys():
if ((partitioning[i] != cg_bead) and (i not in atoms_in_ring)):
submol = frag.GetMol()
for j in range(submol.GetConformer().GetNumAtoms()):
if (molecule.GetConformer().GetAtomPosition(i)[0] == submol.GetConformer().GetAtomPosition(j)[0]):
frag.RemoveAtom(j)
wc_log_p = rdMolDescriptors.CalcCrippenDescriptors(frag.GetMol())[0]
chg = 0
for i in partitioning.keys():
if (partitioning[i] == cg_bead):
chg += molecule.GetAtomWithIdx(i).GetFormalCharge()
smi = Chem.MolToSmiles(Chem.rdmolops.AddHs(frag.GetMol(), addCoords=True))
return (smi, wc_log_p, chg) |
class LayerDecayValueAssigner(object):
def __init__(self, values, is_swin=False, depths=None):
self.values = values
self.is_swin = is_swin
self.depths = depths
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return (get_num_layer_for_swin(var_name, len(self.values), self.depths) if self.is_swin else get_num_layer_for_vit(var_name, len(self.values))) |
def sharp_ifeq(lvalue, rvalue, valueIfTrue, valueIfFalse=None, *args):
rvalue = rvalue.strip()
if rvalue:
if (lvalue.strip() == rvalue):
if valueIfTrue:
return valueIfTrue.strip()
elif valueIfFalse:
return valueIfFalse.strip()
return '' |
class CircleMaze():
def __init__(self):
self.ring_r = 0.15
self.stop_t = 0.05
self.s_angle = 30
self.mean_s0 = (float(np.cos(((np.pi * self.s_angle) / 180))), float(np.sin(((np.pi * self.s_angle) / 180))))
self.mean_g = (float(np.cos(((np.pi * (360 - self.s_angle)) / 180))), float(np.sin(((np.pi * (360 - self.s_angle)) / 180))))
def plot(self, ax=None):
if (ax is None):
(_, ax) = plt.subplots(1, 1, figsize=(5, 4))
if (ax is None):
(_, ax) = plt.subplots(1, 1, figsize=(5, 4))
rads = np.linspace(((self.stop_t * 2) * np.pi), (((1 - self.stop_t) * 2) * np.pi))
xs_i = ((1 - self.ring_r) * np.cos(rads))
ys_i = ((1 - self.ring_r) * np.sin(rads))
xs_o = ((1 + self.ring_r) * np.cos(rads))
ys_o = ((1 + self.ring_r) * np.sin(rads))
ax.plot(xs_i, ys_i, 'k', linewidth=3)
ax.plot(xs_o, ys_o, 'k', linewidth=3)
ax.plot([xs_i[0], xs_o[0]], [ys_i[0], ys_o[0]], 'k', linewidth=3)
ax.plot([xs_i[(- 1)], xs_o[(- 1)]], [ys_i[(- 1)], ys_o[(- 1)]], 'k', linewidth=3)
lim = (1.1 + self.ring_r)
ax.set_xlim([(- lim), lim])
ax.set_ylim([(- lim), lim])
def sample_start(self):
STD = 0.1
return self.move(self.mean_s0, ((STD * np.random.randn()), (STD * np.random.randn())))
def sample_goal(self):
STD = 0.1
return self.move(self.mean_g, ((STD * np.random.randn()), (STD * np.random.randn())))
def xy_to_rt(xy):
x = xy[0]
y = xy[1]
r = np.sqrt(((x ** 2) + (y ** 2)))
t = (np.arctan2(y, x) % (2 * np.pi))
return (r, t)
def move(self, coords, action):
(xp, yp) = coords
(rp, tp) = self.xy_to_rt(coords)
xy = ((coords[0] + action[0]), (coords[1] + action[1]))
(r, t) = self.xy_to_rt(xy)
t = np.clip((t % (2 * np.pi)), ((0.001 + self.stop_t) * (2 * np.pi)), ((1 - (0.001 + self.stop_t)) * (2 * np.pi)))
x = (np.cos(t) * r)
y = (np.sin(t) * r)
if (coords is not None):
if (xp > 0):
if ((y < 0) and (yp > 0)):
t = ((self.stop_t * 2) * np.pi)
elif ((y > 0) and (yp < 0)):
t = (((1 - self.stop_t) * 2) * np.pi)
x = (np.cos(t) * r)
y = (np.sin(t) * r)
n = 8
xyi = np.array([xp, yp]).astype(np.float32)
dxy = ((np.array([x, y]).astype(np.float32) - xyi) / n)
new_r = float(rp)
new_t = float(tp)
count = 0
def r_ok(r_):
return ((1 - self.ring_r) <= r_ <= (1 + self.ring_r))
def t_ok(t_):
return ((self.stop_t * (2 * np.pi)) <= (t_ % (2 * np.pi)) <= ((1 - self.stop_t) * (2 * np.pi)))
while (r_ok(new_r) and t_ok(new_t) and (count < n)):
xyi += dxy
(new_r, new_t) = self.xy_to_rt(xyi)
count += 1
r = np.clip(new_r, ((1 - self.ring_r) + 0.01), ((1 + self.ring_r) - 0.01))
t = np.clip((new_t % (2 * np.pi)), ((0.001 + self.stop_t) * (2 * np.pi)), ((1 - (0.001 + self.stop_t)) * (2 * np.pi)))
x = (np.cos(t) * r)
y = (np.sin(t) * r)
return (float(x), float(y)) |
class AverageMeter(object):
def __init__(self, name=None, fmt='.6f'):
fmtstr = f'{{val:{fmt}}} ({{avg:{fmt}}})'
if (name is not None):
fmtstr = ((name + ' ') + fmtstr)
self.fmtstr = fmtstr
self.reset()
def reset(self):
self.val = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
def avg(self):
avg = (self.sum / self.count)
if isinstance(avg, torch.Tensor):
avg = avg.item()
return avg
def __str__(self):
val = self.val
if isinstance(val, torch.Tensor):
val = val.item()
return self.fmtstr.format(val=val, avg=self.avg) |
def sliding_windows(item=None, rank_start=0, rank_end=100, window_size=20, step=10, model_name='gpt-3.5-turbo', api_key=None):
item = copy.deepcopy(item)
end_pos = rank_end
start_pos = (rank_end - window_size)
while (start_pos >= rank_start):
start_pos = max(start_pos, rank_start)
item = permutation_pipeline(item, start_pos, end_pos, model_name=model_name, api_key=api_key)
end_pos = (end_pos - step)
start_pos = (start_pos - step)
return item |
def find_missing_pose_files(directory: str):
all_files = os.listdir(directory)
mp4_files = [f for f in all_files if f.endswith('.mp4')]
pose_files = {f.removesuffix('.pose') for f in all_files if f.endswith('.pose')}
missing_pose_files = []
for mp4_file in mp4_files:
base_name = mp4_file.removesuffix('.mp4')
if (base_name not in pose_files):
missing_pose_files.append(os.path.join(directory, mp4_file))
return sorted(missing_pose_files) |
def count_single_mulpies(toks, ratio=RATIO):
if isinstance(toks, str):
toks = toks.split()
mulpies = dict()
chord_dict = Counter()
l_toks = len(toks)
for idx in range(0, l_toks, ratio):
(e, d) = toks[idx:(idx + 2)]
if (not ispitch(e)):
if (len(mulpies) > 0):
for (dur, mulpi) in mulpies.items():
if (len(mulpi) > 1):
chord_dict[tuple(sorted(list(mulpi), key=str2pit))] += 1
mulpies = dict()
else:
mulpies.setdefault(d, set()).add(e)
if (len(mulpies) > 0):
for (dur, mulpi) in mulpies.items():
if (len(mulpi) > 1):
chord_dict[tuple(sorted(list(mulpi), key=str2pit))] += 1
return (chord_dict, (l_toks // ratio)) |
def _distance_to_closest_point(point, points):
return min((distance_between_points(point, p) for p in points)) |
def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1):
features = []
threads = min(threads, cpu_count())
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=is_training)
features = list(tqdm(p.imap(annotate_, examples, chunksize=32), total=len(examples), desc='convert squad examples to features'))
new_features = []
unique_id =
example_index = 0
for example_features in tqdm(features, total=len(features), desc='add example index and unique id'):
if (not example_features):
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if (return_dataset == 'pt'):
if (not is_torch_available()):
raise RuntimeError('PyTorch must be installed to return a PyTorch dataset.')
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
if (not is_training):
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask, all_is_impossible)
return (features, dataset)
elif (return_dataset == 'tf'):
if (not is_tf_available()):
raise RuntimeError('TensorFlow must be installed to return a TensorFlow dataset.')
def gen():
for ex in features:
(yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask, 'token_type_ids': ex.token_type_ids}, {'start_position': ex.start_position, 'end_position': ex.end_position, 'cls_index': ex.cls_index, 'p_mask': ex.p_mask, 'is_impossible': ex.is_impossible}))
return tf.data.Dataset.from_generator(gen, ({'input_ids': tf.int32, 'attention_mask': tf.int32, 'token_type_ids': tf.int32}, {'start_position': tf.int64, 'end_position': tf.int64, 'cls_index': tf.int64, 'p_mask': tf.int32, 'is_impossible': tf.int32}), ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None]), 'token_type_ids': tf.TensorShape([None])}, {'start_position': tf.TensorShape([]), 'end_position': tf.TensorShape([]), 'cls_index': tf.TensorShape([]), 'p_mask': tf.TensorShape([None]), 'is_impossible': tf.TensorShape([])}))
return features |
class AdamClonedWeightPredictionForAggregationWithWD(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
weight_decay = pg['weight_decay']
exp_avg_hat = exp_avg
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
if (lr == 0):
continue
d_p = (0 if ((p.grad is None) or (staleness > 1)) else p.grad)
if (weight_decay != 0):
d_p += (weight_decay * p.data)
exp_avg_hat = ((exp_avg_hat * beta1) + ((1 - beta1) * d_p))
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_(exp_avg_hat, denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed() |
class PDFParser_1911_08265(PDFParser):
def _format_df(self):
tables = camelot.read_pdf('../pdfs/1911.08265.pdf', pages='17,18', flavor='stream')
df_noop = tables[0].df
df_noop = df_noop.iloc[:(- 1)].drop(columns=[7])
df_noop = df_noop.T
df_noop = self._remove_index_and_header(df_noop)
df_noop = self._standardize_env_names(df_noop)
df_noop = self._standardize_scores(df_noop)
df_noop = df_noop.add_suffix('_noop')
df_noop.columns = df_noop.columns.str.replace('shingderby', 'fishingderby')
df_human = tables[1].df
df_human = df_human.iloc[:(- 1)].drop(columns=[5])
df_human = df_human.T
df_human = self._remove_index_and_header(df_human)
df_human = self._standardize_env_names(df_human)
df_human = self._standardize_scores(df_human)
df_human = df_human.add_suffix('_human')
df_human.columns = df_human.columns.str.replace('shingderby', 'fishingderby')
self.df = pd.concat([df_noop, df_human], axis=1)
self.df.rename(index={'SimPLe [20]': 'SimPLe', 'Ape-X [18]': 'Ape-X', 'R2D2 [21]': 'R2D2'}, inplace=True)
self._add_paper_metadata(title='Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model', authors='Julian Schrittwieser, Ioannis Antonoglou, Thomas Hubert, Karen Simonyan, Laurent Sifre, Simon Schmitt, Arthur Guez, Edward Lockhart, Demis Hassabis, Thore Graepel, Timothy Lillicrap, David Silver', link=' arxiv_id='1911.08265', arxiv_version=1, bibtex='\n {1911.08265,\n Author = {Julian Schrittwieser and Ioannis Antonoglou and Thomas Hubert and Karen Simonyan and Laurent Sifre and Simon Schmitt and Arthur Guez and Edward Lockhart and Demis Hassabis and Thore Graepel and Timothy Lillicrap and David Silver},\n Title = {Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model},\n Year = {2019},\n Eprint = {arXiv:1911.08265},\n }')
self._pre_add_agent_metadata()
self._add_agent_metadata('Random', fullname='Random', nickname='Random')
self._add_agent_metadata('Human', fullname='Human', nickname='Human')
self._add_agent_metadata('SimPLe', fullname='Simulated Policy Learning', nickname='SimPLe')
self._add_agent_metadata('Ape-X', fullname='Ape-X', nickname='Ape-X')
self._add_agent_metadata('R2D2', fullname='Recurrent Replay Distributed Deep Q-Network', nickname='R2D2')
self._add_agent_metadata('MuZero', fullname='MuZero', nickname='MuZero') |
_module()
class ResNeXt(ResNet):
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs) |
('matplotlib', '>=3.3')
def try_all_threshold(image, figsize=(8, 5), verbose=True):
def thresh(func):
def wrapper(im):
return (im > func(im))
try:
wrapper.__orifunc__ = func.__orifunc__
except AttributeError:
wrapper.__orifunc__ = ((func.__module__ + '.') + func.__name__)
return wrapper
methods = OrderedDict({'Isodata': thresh(threshold_isodata), 'Li': thresh(threshold_li), 'Mean': thresh(threshold_mean), 'Minimum': thresh(threshold_minimum), 'Otsu': thresh(threshold_otsu), 'Triangle': thresh(threshold_triangle), 'Yen': thresh(threshold_yen)})
return _try_all(image, figsize=figsize, methods=methods, verbose=verbose) |
def renameDatasetColumn(dataset):
col_names = dataset.column_names
for cols in col_names:
if ('-' in cols):
dataset = dataset.rename_column(cols, cols.replace('-', '_'))
return dataset |
class TestShapTabular(unittest.TestCase):
def test_explain(self):
task = TabularRegression().train_boston()
predict_function = (lambda z: task.model.predict(task.transform.transform(z)))
set_random_seed()
explainer = ShapTabular(training_data=task.train_data, predict_function=predict_function, mode='regression', ignored_features=None, nsamples=100)
i = 25
test_x = task.test_data.iloc(i)
explanations = explainer.explain(test_x, nsamples=100)
for e in explanations.get_explanations():
print(e['instance'])
pprint.pprint(list(zip(e['features'], e['values'], e['scores'])))
self.assertEqual(e['features'][0], 'RM')
self.assertEqual(e['features'][1], 'LSTAT')
self.assertEqual(e['features'][2], 'B') |
def is_gcov_enabled(cargs):
if (not is_exe(cargs.readelf_path)):
print('[*] Need a valid path to readelf, use --readelf-path')
return False
if cargs.coverage_cmd:
if ('AFL_FILE' not in cargs.coverage_cmd):
print('[*] --coverage-cmd must contain AFL_FILE')
return False
found_exec = False
found_code_cov_binary = False
for part in cargs.coverage_cmd.split(' '):
if ((not part) or (part[0] == ' ') or (part[0] == '-')):
continue
if which(part):
found_exec = True
if ((not cargs.disable_gcov_check) and is_bin_gcov_enabled(part, cargs)):
found_code_cov_binary = True
break
if (not found_exec):
print(("[*] Could not find an executable binary --coverage-cmd '%s'" % cargs.coverage_cmd))
return False
if ((not cargs.disable_gcov_check) and (not found_code_cov_binary)):
print(("[*] Could not find an executable binary with code coverage support ('-fprofile-arcs -ftest-coverage') in --coverage-cmd '%s'" % cargs.coverage_cmd))
return False
elif cargs.gcov_check_bin:
if (not is_bin_gcov_enabled(cargs.gcov_check_bin, cargs)):
return False
elif cargs.gcov_check:
print('[*] Either --coverage-cmd or --gcov-check-bin required in --gcov-check mode')
return False
return True |
class EFDTActiveLeaf(ActiveLeafClass):
def get_null_split(self, criterion):
pre_split_dist = self.stats
null_split = AttributeSplitSuggestion(None, [{}], criterion.get_merit_of_split(pre_split_dist, [pre_split_dist]))
if (null_split.merit == (- np.inf)):
null_split.merit = 0.0
return null_split
def get_best_split_suggestions(self, criterion, tree):
best_suggestions = []
pre_split_dist = self.stats
for (idx, obs) in self.attribute_observers.items():
best_suggestion = obs.get_best_evaluated_split_suggestion(criterion, pre_split_dist, idx, tree.binary_split)
if (best_suggestion is not None):
best_suggestions.append(best_suggestion)
return best_suggestions
def count_nodes(self):
return np.array([0, 1]) |
def conv_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif (classname.find('BatchNorm') != (- 1)):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0) |
def expected_calibration_error(confs, preds, labels, num_bins=10):
def _populate_bins(confs, preds, labels, num_bins):
bin_dict = defaultdict((lambda : {'bin_accuracy': 0, 'bin_confidence': 0, 'count': 0}))
bins = np.linspace(0, 1, (num_bins + 1))
for (conf, pred, label) in zip(confs, preds, labels):
bin_idx = (np.searchsorted(bins, conf) - 1)
bin_dict[bin_idx]['bin_accuracy'] += int((pred == label))
bin_dict[bin_idx]['bin_confidence'] += conf
bin_dict[bin_idx]['count'] += 1
return bin_dict
bin_dict = _populate_bins(confs, preds, labels, num_bins)
num_samples = len(labels)
ece = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i]['bin_accuracy']
bin_confidence = bin_dict[i]['bin_confidence']
bin_count = bin_dict[i]['count']
ece += ((float(bin_count) / num_samples) * abs(((bin_accuracy / bin_count) - (bin_confidence / bin_count))))
return ece |
class DenseController(Controller):
def __init__(self, incoming, memory_shape, num_units, num_reads, W_in_to_hid=lasagne.init.GlorotUniform(), b_in_to_hid=lasagne.init.Constant(0.0), W_reads_to_hid=lasagne.init.GlorotUniform(), b_reads_to_hid=lasagne.init.Constant(0.0), nonlinearity=lasagne.nonlinearities.rectify, hid_init=lasagne.init.GlorotUniform(), learn_init=False, **kwargs):
super(DenseController, self).__init__(incoming, memory_shape, num_units, num_reads, hid_init, learn_init, **kwargs)
self.nonlinearity = (lasagne.nonlinearities.identity if (nonlinearity is None) else nonlinearity)
def add_weight_and_bias_params(input_dim, W, b, name):
return (self.add_param(W, (input_dim, self.num_units), name='W_{}'.format(name)), (self.add_param(b, (self.num_units,), name='b_{}'.format(name)) if (b is not None) else None))
num_inputs = int(np.prod(self.input_shape[2:]))
(self.W_in_to_hid, self.b_in_to_hid) = add_weight_and_bias_params(num_inputs, W_in_to_hid, b_in_to_hid, name='in_to_hid')
(self.W_reads_to_hid, self.b_reads_to_hid) = add_weight_and_bias_params((self.num_reads * self.memory_shape[1]), W_reads_to_hid, b_reads_to_hid, name='reads_to_hid')
def step(self, input, reads, *args):
if (input.ndim > 2):
input = input.flatten(2)
if (reads.ndim > 2):
reads = reads.flatten(2)
activation = (T.dot(input, self.W_in_to_hid) + T.dot(reads, self.W_reads_to_hid))
if (self.b_in_to_hid is not None):
activation += self.b_in_to_hid.dimshuffle('x', 0)
if (self.b_reads_to_hid is not None):
activation += self.b_reads_to_hid.dimshuffle('x', 0)
state = self.nonlinearity(activation)
return (state, state)
def outputs_info(self, batch_size):
ones_vector = T.ones((batch_size, 1))
hid_init = T.dot(ones_vector, self.hid_init)
hid_init = T.unbroadcast(hid_init, 0)
return [hid_init, hid_init] |
class Exponential(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 1.0)] * self.N), ([1.0] * self.N)))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = (- 1.0)
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return (- exp(((- 0.5) * sum((x ** 2.0))))) |
def estimate_hoeffding_lower_bound(x: np.ndarray, x_max: Optional[float]=None, delta: float=0.05) -> float:
if (x_max is None):
x_max = x.max()
else:
check_scalar(x_max, 'x_max', (int, float), min_val=x.max())
check_scalar(delta, 'delta', (int, float), min_val=0.0, max_val=1.0)
n = x.shape[0]
ci = (x_max * sqrt((log((1.0 / delta)) / (2 * n))))
lower_bound_estimate = (x.mean() - ci)
return lower_bound_estimate |
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
assert (stride == 2)
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1) |
class DeepConfig(Config):
def __init__(self, batch_size: int=32, num_epochs: int=10, optimizer: Union[(str, Optimizer)]=Optimizer.Adam, loss_fn: Union[(str, LossFunction)]=LossFunction.mse, clip_gradient: Optional[float]=None, use_gpu: bool=True, ts_encoding: Union[(None, str)]='h', lr: float=0.0001, weight_decay: float=0.0, valid_fraction: float=0.2, early_stop_patience: Union[(None, int)]=None, **kwargs):
super().__init__(**kwargs)
def optimizer(self) -> Optimizer:
return self._optimizer
def optimizer(self, optimizer: Union[(str, Optimizer)]):
if isinstance(optimizer, str):
valid = set(Optimizer.__members__.keys())
if (optimizer not in valid):
raise KeyError(f'{optimizer} is not a valid optimizer that supported. Valid optimizers are: {valid}')
optimizer = Optimizer[optimizer]
self._optimizer = optimizer
def loss_fn(self) -> LossFunction:
return self._loss_fn
_fn.setter
def loss_fn(self, loss_fn: Union[(str, LossFunction)]):
if isinstance(loss_fn, str):
valid = set(LossFunction.__members__.keys())
if (loss_fn not in valid):
raise KeyError(f'{loss_fn} is not a valid loss that supported. Valid optimizers are: {valid}')
loss_fn = LossFunction[loss_fn]
self._loss_fn = loss_fn |
def braid_in_segment(glist, x0, x1, precision={}):
precision1 = {_: precision[_] for _ in precision.keys()}
g = prod(glist)
F1 = g.base_ring()
(x, y) = g.parent().gens()
X0 = F1(x0)
X1 = F1(x1)
intervals = {}
if (not precision1):
precision1 = {f: 53 for f in glist}
y0s = []
for f in glist:
if (f.variables() == (y,)):
f0 = F1[y](f)
else:
f0 = F1[y](f.subs({x: X0}))
y0sf = f0.roots(QQbar, multiplicities=False)
y0s += list(y0sf)
while True:
CIFp = ComplexIntervalField(precision1[f])
intervals[f] = [r.interval(CIFp) for r in y0sf]
if (not any((a.overlaps(b) for (a, b) in itertools.combinations(intervals[f], 2)))):
break
precision1[f] *= 2
strands = []
for f in glist:
for i in intervals[f]:
aux = followstrand(f, [p for p in glist if (p != f)], x0, x1, i.center(), precision1[f])
strands.append(aux)
complexstrands = [[(QQ(a[0]), QQ(a[1]), QQ(a[2])) for a in b] for b in strands]
centralbraid = braid_from_piecewise(complexstrands)
initialstrands = []
finalstrands = []
initialintervals = roots_interval_cached(g, X0)
finalintervals = roots_interval_cached(g, X1)
I1 = QQbar.gen()
for cs in complexstrands:
ip = (cs[0][1] + (I1 * cs[0][2]))
fp = (cs[(- 1)][1] + (I1 * cs[(- 1)][2]))
matched = 0
for (center, interval) in initialintervals.items():
if (ip in interval):
initialstrands.append([(0, center.real(), center.imag()), (1, cs[0][1], cs[0][2])])
matched += 1
if (matched != 1):
precision1 = {f: (precision1[f] * 2) for f in glist}
return braid_in_segment(glist, x0, x1, precision=precision1)
matched = 0
for (center, interval) in finalintervals.items():
if (fp in interval):
finalstrands.append([(0, cs[(- 1)][1], cs[(- 1)][2]), (1, center.real(), center.imag())])
matched += 1
if (matched != 1):
precision1 = {f: (precision1[f] * 2) for f in glist}
return braid_in_segment(glist, x0, x1, precision=precision1)
initialbraid = braid_from_piecewise(initialstrands)
finalbraid = braid_from_piecewise(finalstrands)
return ((initialbraid * centralbraid) * finalbraid) |
class ResGRU(ResRNNBase):
def __init__(self, ninp, nhid, nlayers, dropout, direction):
super(ResGRU, self).__init__('GRU', ninp, nhid, nlayers, dropout=dropout, direction=direction) |
def args2powersetdict(args: Any, powerset_args: List[Any], args_unique: List[Any], dict_args_cfg_empty: Dict[(str, Any)]) -> Tuple[(Any, Any)]:
dicts_sets = []
names_sets = []
powerset = [getattr(args, arg) for arg in powerset_args]
combinations = list(itertools.product(*powerset))
for pset in combinations:
cfg_dict = deepcopy(dict_args_cfg_empty)
trainid_combi = ''
for (idx, arg) in enumerate(powerset_args):
for superarg in list(cfg_dict.keys()):
if (arg in list(cfg_dict[superarg].keys())):
cfg_dict[superarg][arg] = pset[idx]
if (arg in args_jloads):
cfg_dict[superarg][arg] = pset[idx].replace("'", '')
if (arg in args_jloads):
key = pset[idx].split('"')[1]
trainid_combi += '_'
trainid_combi += str(key)
else:
trainid_combi += '_'
trainid_combi += arg.replace('_', '')
trainid_combi += str(pset[idx])
for (idx, arg) in enumerate(args_unique):
for superarg in list(cfg_dict.keys()):
if (arg in list(cfg_dict[superarg].keys())):
cfg_dict[superarg][arg] = getattr(args, arg)
trainid_combi = trainid_combi.replace('[', '').replace(']', '').replace(' ', '').replace(',', '-')
dicts_sets.append(deepcopy(cfg_dict))
names_sets.append(trainid_combi)
return (dicts_sets, names_sets) |
def mp_hyp2f1(a, b, c, z):
on_branch_cut = ((z.real > 1.0) and (abs(z.imag) < 1e-15))
cond1 = ((abs(((c - a) - round((c - a)))) < 1e-15) and (round((c - a)) <= 0))
cond2 = ((abs(((c - b) - round((c - b)))) < 1e-15) and (round((c - b)) <= 0))
if on_branch_cut:
z = (z.real + 0j)
if (on_branch_cut and (not (cond1 or cond2))):
z_mpmath = (z.real + 1e-15j)
else:
z_mpmath = z
return complex(mpmath.hyp2f1(a, b, c, z_mpmath)) |
def test__rollback_changes_nothing_to_rollback(default_test_case):
default_test_case.add_statement(stmt.IntPrimitiveStatement(default_test_case, 5))
default_test_case.add_statement(stmt.IntPrimitiveStatement(default_test_case, 10))
default_test_case.add_statement(stmt.IntPrimitiveStatement(default_test_case, 15))
cloned = default_test_case.clone()
tf.TestFactory._rollback_changes(default_test_case, cloned.size(), 3)
assert (cloned == default_test_case) |
def get_optimizer(args, net):
base_params = []
for (name, param) in net.named_parameters():
base_params.append(param)
if args.sgd:
optimizer = optim.SGD(base_params, lr=args.lr, weight_decay=0.0005, momentum=args.momentum, nesterov=False)
else:
raise ValueError('Not a valid optimizer')
if (args.lr_schedule == 'scl-poly'):
if (cfg.REDUCE_BORDER_ITER == (- 1)):
raise ValueError('ERROR Cannot Do Scale Poly')
rescale_thresh = cfg.REDUCE_BORDER_ITER
scale_value = args.rescale
lambda1 = (lambda iteration: (math.pow((1 - (iteration / args.max_iter)), args.poly_exp) if (iteration < rescale_thresh) else (scale_value * math.pow((1 - ((iteration - rescale_thresh) / (args.max_iter - rescale_thresh))), args.repoly))))
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
elif (args.lr_schedule == 'poly'):
lambda1 = (lambda iteration: math.pow((1 - (iteration / args.max_iter)), args.poly_exp))
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda1)
else:
raise ValueError('unknown lr schedule {}'.format(args.lr_schedule))
return (optimizer, scheduler) |
class Embedder(metaclass=abc.ABCMeta):
def tokenize(self, sentence):
pass
def untokenize(self, tokens):
pass
def lookup(self, token):
pass
def contains(self, token):
pass
def to(self, device):
pass |
_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
return (dst, src, where) |
.parametrize('attr', simulation_state_nparray_attrs)
def test_hdf_simulation_state_nparray(hdf_file_path, simulation_verysimple, attr):
path = f'simulation_state/{attr}'
expected = pd.read_hdf(hdf_file_path, path)
actual = getattr(simulation_verysimple.simulation_state, attr)
if hasattr(actual, 'cgs'):
actual = actual.cgs.value
assert_almost_equal(actual, expected.values) |
def get_variants_sparse(domain, task, policy, seed, gamma):
RUN_PARAMS_BASE['seed'] = seed
ALGORITHM_PARAMS_BASE['discount'] = gamma
params = {'prefix': '{}/{}'.format(domain, task), 'domain': domain, 'task': task, 'git_sha': get_git_rev(), 'env_params': ENV_PARAMS[domain].get(task, {}), 'policy_params': POLICY_PARAMS[policy][domain], 'value_fn_params': VALUE_FUNCTION_PARAMS, 'algorithm_params': deep_update(ALGORITHM_PARAMS_BASE, ALGORITHM_PARAMS_SPARSE[domain]), 'replay_buffer_params': REPLAY_BUFFER_PARAMS, 'sampler_params': SAMPLER_PARAMS, 'run_params': deep_update(RUN_PARAMS_BASE, RUN_PARAMS[domain])}
params = flatten(params, separator='.')
vg = VariantGenerator()
for (key, val) in params.items():
if (isinstance(val, list) or callable(val)):
vg.add(key, val)
else:
vg.add(key, [val])
return vg |
class IPERProtocol(Protocol):
def __init__(self, data_dir='/p300/iPER'):
super().__init__()
self.data_dir = data_dir
self.train_ids_file = 'train.txt'
self.test_ids_file = 'val.txt'
self.eval_path = 'iPER_protocol.json'
self.images_folder = 'images_HD'
self.smpls_folder = 'smpls'
full_eval_path = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 'data', 'iPER_protocol.json')
self.eval_info = load_json_file(full_eval_path)['val']
self.vid_names = list(self.eval_info.keys())
self._all_vid_smpls = {}
self._all_vid_kps = {}
self._num_source = 1
self._load_smpls = False
self._load_kps = False
def __len__(self):
return len(self.vid_names)
def take_images_paths(self, vid_name, start, end):
vid_path = os.path.join(self.data_dir, self.images_folder, vid_name)
vid_images_paths = glob.glob(os.path.join(vid_path, '*'))
vid_images_paths.sort()
images_paths = vid_images_paths[start:(end + 1)]
return images_paths
def setup(self, num_sources=1, load_smpls=False, load_kps=False):
self._num_source = num_sources
self._load_smpls = load_smpls
self._load_kps = load_kps
def __getitem__(self, item):
num_sources = self._num_source
load_smpls = self._load_smpls
load_kps = self._load_kps
vid_name = self.vid_names[item]
vid_info = self.eval_info[vid_name]
eval_info = dict()
src_vid_smpls = self.get_smpls(vid_name)
src_vid_kps = self.get_kps(vid_name)
src_vid_path = os.path.join(self.data_dir, self.images_folder, vid_name)
src_img_paths = glob.glob(os.path.join(src_vid_path, '*'))
src_img_paths.sort()
src_img_names = vid_info['s_n'][str(num_sources)]
src_img_ids = [int(t.split('.')[0]) for t in src_img_names]
eval_info['source'] = {'s_n': num_sources, 'name': vid_name, 'formated_name': self.format_name(vid_name), 'vid_path': os.path.join(self.data_dir, self.images_folder, vid_name), 'images': [src_img_paths[t] for t in src_img_ids], 'smpls': (src_vid_smpls[src_img_ids] if load_smpls else None), 'kps': (src_vid_kps[src_img_ids] if load_kps else None)}
self_imitation = vid_info['self_imitation']
eval_info['self_imitation'] = {'name': self_imitation['target'], 'formated_name': self.format_name(self_imitation['target']), 'images': src_img_paths[self_imitation['range'][0]:(self_imitation['range'][1] + 1)], 'smpls': (src_vid_smpls[self_imitation['range'][0]:(self_imitation['range'][1] + 1)] if load_smpls else None), 'kps': (src_vid_kps[self_imitation['range'][0]:(self_imitation['range'][1] + 1)] if load_kps else None), 'self_imitation': True}
cross_imitation = vid_info['cross_imitation']
target_vid_name = cross_imitation['target']
target_vid_smpls = self.get_smpls(target_vid_name)
target_vid_kps = self.get_kps(target_vid_name)
cross_images_paths = self.take_images_paths(vid_name=target_vid_name, start=cross_imitation['range'][0], end=cross_imitation['range'][1])
eval_info['cross_imitation'] = {'name': target_vid_name, 'formated_name': self.format_name(target_vid_name), 'images': cross_images_paths, 'smpls': (target_vid_smpls[cross_imitation['range'][0]:(cross_imitation['range'][1] + 1)] if load_smpls else None), 'kps': (target_vid_kps[cross_imitation['range'][0]:(cross_imitation['range'][1] + 1)] if load_kps else None), 'self_imitation': False}
eval_info['flag'] = self.take_images_paths(vid_name=vid_name, start=vid_info['flag'][0], end=vid_info['flag'][1])
assert ((cross_imitation['range'][1] - cross_imitation['range'][0]) == (vid_info['flag'][1] - vid_info['flag'][0]))
return eval_info
def format_name(self, name):
formated_name = '_'.join(name.split('/'))
return formated_name
def original_name(self, formated_name):
original_name = '/'.join(formated_name.split('_'))
return original_name
def get_smpl_path(self, name):
smpl_path = os.path.join(self.data_dir, self.smpls_folder, name, 'pose_shape.pkl')
return smpl_path
def get_kps_path(self, name):
smpl_path = os.path.join(self.data_dir, self.smpls_folder, name, 'kps.pkl')
return smpl_path
def get_smpls(self, name):
smpls = None
if (name in self.eval_info):
if (name not in self._all_vid_smpls):
smpl_path = self.get_smpl_path(name)
smpl_data = load_pickle_file(smpl_path)
cams = smpl_data['cams']
thetas = smpl_data['pose']
betas = smpl_data['shape']
smpls = np.concatenate([cams, thetas, betas], axis=1)
self._all_vid_smpls[name] = smpls
else:
smpls = self._all_vid_smpls[name]
return smpls
def get_kps(self, name):
kps = None
if (name in self.eval_info):
if (name not in self._all_vid_kps):
kps_path = self.get_kps_path(name)
kps = load_pickle_file(kps_path)['kps']
self._all_vid_kps[name] = kps
else:
kps = self._all_vid_kps[name]
return kps
def total_frames(self):
total = 0
for (vid_name, vid_info) in self.eval_info.items():
src_vid = os.path.join(self.data_dir, self.images_folder, vid_name)
length = len(os.listdir(src_vid))
total += length
return total |
def typeset_solvers_table(fd, solver_table):
rest_tag_start = '.. <%s>\n'
rest_tag_end = '.. </%s>\n'
for solver_type in solver_table:
fd.write((rest_tag_start % solver_type[1]))
for (name, cls) in sorted(solver_type[0].items()):
fd.write(('- :class:`%s <%s.%s>`: ' % (name, cls.__module__, cls.__name__)))
fd.write(('%s\n' % trim(cls.__doc__)[0]))
fd.write((rest_tag_end % solver_type[1]))
fd.write('\n') |
class EMA(object):
def __init__(self, mu=0.999):
self.mu = mu
self.shadow = {}
def register(self, module):
for (name, param) in module.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self, module):
for (name, param) in module.named_parameters():
if param.requires_grad:
self.shadow[name].data = (((1.0 - self.mu) * param.data) + (self.mu * self.shadow[name].data))
def ema(self, module):
for (name, param) in module.named_parameters():
if param.requires_grad:
param.data.copy_(self.shadow[name].data)
def ema_copy(self, module):
module_copy = type(module)(module.config).to(module.config.device)
module_copy.load_state_dict(module.state_dict())
self.ema(module_copy)
return module_copy
def state_dict(self):
return self.shadow
def load_state_dict(self, state_dict):
self.shadow = state_dict |
class TFMT5Model(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class TestWeightSetting(unittest.TestCase):
def test_obtain_weights(self):
power_signals_d = np.array([[0., 0.0, 0.0, 2.], [0., 0.0, 0.0, 2.], [0.8125, 0.0, 0.0, 2.], [0., 0.0, 0.0, 2.]])
expected_weights = np.array([0.0, 0.0, 0.0, 0.0])
weight_setting = WeightSetting()
actual_weights = weight_setting.obtain_weights(power_signals_d)
np.testing.assert_array_equal(actual_weights, expected_weights)
def test_obtain_weights_with_large_data(self):
input_power_signals_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/initialization/one_year_power_signals_1.csv'))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
weights_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/initialization/one_year_weights_1.csv'))
with open(weights_file_path) as file:
expected_weights = np.loadtxt(file, delimiter=',')
weight_setting = WeightSetting(solver_type='MOSEK')
try:
actual_weights = weight_setting.obtain_weights(power_signals_d)
except cvx.SolverError:
self.skipTest((('This test uses MOSEK solver' + 'because default ECOS solver fails with large data. ') + 'Unless MOSEK is installed, this test fails.'))
else:
np.testing.assert_allclose(actual_weights, expected_weights, rtol=1e-05) |
def load_fasttext(language):
lang = constants.LANGUAGE_CODES[language]
ft_path = 'data/fasttext'
ft_fname = os.path.join(ft_path, ('cc.%s.300.bin' % lang))
if (not os.path.exists(ft_fname)):
logging.info('Downloading fasttext model')
temp_fname = fasttext.util.download_model(lang, if_exists='ignore')
util.mkdir(ft_path)
os.rename(temp_fname, ft_fname)
os.rename((temp_fname + '.gz'), (ft_fname + '.gz'))
logging.info('Loading fasttext model')
return fasttext.load_model(ft_fname) |
def register_Ns3Dot11sIePeeringProtocol_methods(root_module, cls):
cls.add_constructor([param('ns3::dot11s::IePeeringProtocol const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'i'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'i')], is_const=True, is_virtual=True)
return |
class stacked_DMSHN(nn.Module):
def __init__(self):
super(stacked_DMSHN, self).__init__()
self.net1 = DMSHN()
self.net2 = DMSHN()
def forward(self, x):
out1 = self.net1(x)
out2 = self.net2(out1)
return out2 |
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
(cmd, data) = remote.recv()
if (cmd == 'step'):
(ob, reward, done, info) = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif (cmd == 'reset'):
if isinstance(data, dict):
ob = env.reset(**data)
else:
ob = env.reset()
remote.send(ob)
elif (cmd == 'render'):
env.render()
elif (cmd == 'close'):
remote.close()
break
elif (cmd == 'get_spaces'):
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError |
def linear_layer(x, is_training, num_classes, use_bias=True, use_bn=False, name='linear_layer'):
assert (x.shape.ndims == 2), x.shape
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x = tf.layers.dense(inputs=x, units=num_classes, use_bias=(use_bias and (not use_bn)), kernel_initializer=tf.random_normal_initializer(stddev=0.01))
if use_bn:
x = resnet.batch_norm_relu(x, is_training, relu=False, center=use_bias)
x = tf.identity(x, ('%s_out' % name))
return x |
class BaseTracker():
def __init__(self, cutie_checkpoint, device) -> None:
config = OmegaConf.create(CONFIG)
network = CUTIE(config).to(device).eval()
model_weights = torch.load(cutie_checkpoint, map_location=device)
network.load_weights(model_weights)
self.tracker = InferenceCore(network, config)
self.device = device
self.mapper = MaskMapper()
self.initialised = False
_grad()
def resize_mask(self, mask):
(h, w) = mask.shape[(- 2):]
min_hw = min(h, w)
return F.interpolate(mask, (int(((h / min_hw) * self.size)), int(((w / min_hw) * self.size))), mode='nearest')
_grad()
def image_to_torch(self, frame: np.ndarray, device: str='cuda'):
frame = frame.transpose(2, 0, 1)
frame = (torch.from_numpy(frame).float().to(device, non_blocking=True) / 255)
return frame
_grad()
def track(self, frame, first_frame_annotation=None):
if (first_frame_annotation is not None):
(mask, labels) = self.mapper.convert_mask(first_frame_annotation)
mask = torch.Tensor(mask).to(self.device)
else:
mask = None
labels = None
frame_tensor = self.image_to_torch(frame, self.device)
probs = self.tracker.step(frame_tensor, mask, labels)
out_mask = torch.argmax(probs, dim=0)
out_mask = out_mask.detach().cpu().numpy().astype(np.uint8)
final_mask = np.zeros_like(out_mask)
for (k, v) in self.mapper.remappings.items():
final_mask[(out_mask == v)] = k
num_objs = final_mask.max()
painted_image = frame
for obj in range(1, (num_objs + 1)):
if (np.max((final_mask == obj)) == 0):
continue
painted_image = mask_painter(painted_image, (final_mask == obj).astype('uint8'), mask_color=(obj + 1))
return (final_mask, final_mask, painted_image)
_grad()
def clear_memory(self):
self.tracker.clear_memory()
self.mapper.clear_labels()
torch.cuda.empty_cache() |
class ElementWiseArrayOperation(pm.SingleStateTransformation):
map_entry = pm.PatternNode(nodes.MapEntry)
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry)]
def can_be_applied(self, graph: dace.SDFGState, expr_index: int, sdfg: dace.SDFG, permissive: bool=False):
map_entry = self.map_entry
map_exit = graph.exit_node(map_entry)
params = [dace.symbol(p) for p in map_entry.map.params]
if ('commsize' in map_entry.map.range.free_symbols):
return False
if ('Px' in map_entry.map.range.free_symbols):
return False
if ('Py' in map_entry.map.range.free_symbols):
return False
for node in subgraph_from_maps(sdfg, graph, [map_entry]):
if isinstance(node, dace.nodes.CodeNode):
for p in params:
if (str(p) in node.free_symbols):
return False
inputs = dict()
for (_, _, _, _, m) in graph.out_edges(map_entry):
if (not m.data):
continue
desc = sdfg.arrays[m.data]
if (desc not in inputs.keys()):
inputs[desc] = []
inputs[desc].append(m.subset)
for (desc, accesses) in inputs.items():
if isinstance(desc, dace.data.Scalar):
continue
elif isinstance(desc, (dace.data.Array, dace.data.View)):
if (list(desc.shape) == [1]):
continue
for a in accesses:
if (a.num_elements() != 1):
return False
indices = a.min_element()
unmatched_indices = set(params)
for idx in indices:
if (idx in unmatched_indices):
unmatched_indices.remove(idx)
if (len(unmatched_indices) > 0):
return False
else:
return False
outputs = dict()
for (_, _, _, _, m) in graph.in_edges(map_exit):
if m.wcr:
return False
desc = sdfg.arrays[m.data]
if (desc not in outputs.keys()):
outputs[desc] = []
outputs[desc].append(m.subset)
for (desc, accesses) in outputs.items():
if isinstance(desc, (dace.data.Array, dace.data.View)):
for a in accesses:
if (a.num_elements() != 1):
return False
indices = a.min_element()
unmatched_indices = set(params)
for idx in indices:
if (idx in unmatched_indices):
unmatched_indices.remove(idx)
if (len(unmatched_indices) > 0):
return False
else:
return False
return True
def apply(self, graph: dace.SDFGState, sdfg: dace.SDFG):
map_entry = self.map_entry
map_exit = graph.exit_node(map_entry)
sz = dace.symbol('commsize', dtype=dace.int32)
def _prod(sequence):
return reduce((lambda a, b: (a * b)), sequence, 1)
if (len(map_entry.map.params) == 1):
params = map_entry.map.params
ranges = [(0, ((((e - b) + 1) / sz) - 1), 1) for (b, e, _) in map_entry.map.range]
strides = [1]
else:
params = ['__iflat']
sizes = map_entry.map.range.size_exact()
total_size = _prod(sizes)
ranges = [(0, ((total_size / sz) - 1), 1)]
strides = [_prod(sizes[(i + 1):]) for i in range(len(sizes))]
root_name = sdfg.temp_data_name()
sdfg.add_scalar(root_name, dace.int32, transient=True)
root_node = graph.add_access(root_name)
root_tasklet = graph.add_tasklet('_set_root_', {}, {'__out'}, '__out = 0')
graph.add_edge(root_tasklet, '__out', root_node, None, dace.Memlet.simple(root_name, '0'))
from dace.libraries.mpi import Bcast, Scatter, Gather
inputs = set()
for (src, _, _, _, m) in graph.in_edges(map_entry):
if (not isinstance(src, nodes.AccessNode)):
raise NotImplementedError
desc = src.desc(sdfg)
if (not isinstance(desc, (data.Scalar, data.Array))):
raise NotImplementedError
if (list(desc.shape) != m.src_subset.size_exact()):
if (str(list(desc.shape)) != str(m.src_subset.size_exact())):
raise NotImplementedError
inputs.add(src)
for inp in inputs:
desc = inp.desc(sdfg)
if isinstance(desc, data.Scalar):
local_access = graph.add_access(inp.data)
bcast_node = Bcast('_Bcast_')
graph.add_edge(inp, None, bcast_node, '_inbuffer', dace.Memlet.from_array(inp.data, desc))
graph.add_edge(root_node, None, bcast_node, '_root', dace.Memlet.simple(root_name, '0'))
graph.add_edge(bcast_node, '_outbuffer', local_access, None, dace.Memlet.from_array(inp.data, desc))
for e in graph.edges_between(inp, map_entry):
graph.add_edge(local_access, None, map_entry, e.dst_conn, dace.Memlet.from_array(inp.data, desc))
graph.remove_edge(e)
elif isinstance(desc, data.Array):
(local_name, local_arr) = sdfg.add_temp_transient([sympy.floor((desc.total_size / sz))], dtype=desc.dtype, storage=desc.storage)
local_access = graph.add_access(local_name)
scatter_node = Scatter('_Scatter_')
graph.add_edge(inp, None, scatter_node, '_inbuffer', dace.Memlet.from_array(inp.data, desc))
graph.add_edge(root_node, None, scatter_node, '_root', dace.Memlet.simple(root_name, '0'))
graph.add_edge(scatter_node, '_outbuffer', local_access, None, dace.Memlet.from_array(local_name, local_arr))
for e in graph.edges_between(inp, map_entry):
graph.add_edge(local_access, None, map_entry, e.dst_conn, dace.Memlet.from_array(local_name, local_arr))
graph.remove_edge(e)
for e in graph.out_edges(map_entry):
if (e.data.data == inp.data):
e.data = dace.Memlet.simple(local_name, params[0])
else:
raise NotImplementedError
outputs = set()
for (_, _, dst, _, m) in graph.out_edges(map_exit):
if (not isinstance(dst, nodes.AccessNode)):
raise NotImplementedError
desc = dst.desc(sdfg)
if (not isinstance(desc, data.Array)):
raise NotImplementedError
try:
if (list(desc.shape) != m.dst_subset.size_exact()):
if (str(list(desc.shape)) != str(m.dst_subset.size_exact())):
raise NotImplementedError
except AttributeError:
if (list(desc.shape) != m.subset.size_exact()):
if (str(list(desc.shape)) != str(m.subset.size_exact())):
raise NotImplementedError
outputs.add(dst)
for out in outputs:
desc = out.desc(sdfg)
if isinstance(desc, data.Scalar):
raise NotImplementedError
elif isinstance(desc, data.Array):
(local_name, local_arr) = sdfg.add_temp_transient([sympy.floor((desc.total_size / sz))], dtype=desc.dtype, storage=desc.storage)
local_access = graph.add_access(local_name)
scatter_node = Gather('_Gather_')
graph.add_edge(local_access, None, scatter_node, '_inbuffer', dace.Memlet.from_array(local_name, local_arr))
graph.add_edge(root_node, None, scatter_node, '_root', dace.Memlet.simple(root_name, '0'))
graph.add_edge(scatter_node, '_outbuffer', out, None, dace.Memlet.from_array(out.data, desc))
for e in graph.edges_between(map_exit, out):
graph.add_edge(map_exit, e.src_conn, local_access, None, dace.Memlet.from_array(local_name, local_arr))
graph.remove_edge(e)
for e in graph.in_edges(map_exit):
if (e.data.data == out.data):
e.data = dace.Memlet.simple(local_name, params[0])
else:
raise NotImplementedError
map_entry.map.params = params
map_entry.map.range = subsets.Range(ranges) |
def validate_callable(property_name, obj):
if (not callable(obj)):
raise TypeError(f'{property_name} must be callable and {type(obj)} is not.')
return obj |
class Issue4RunEquality(unittest.TestCase):
def setUp(self):
self._path = os.path.dirname(os.path.realpath(__file__))
def _create_template_run_id():
executor = Executor('MyVM', 'foo_bar_path', 'foo_bar_bin', None, None, None, None, None, None, 'benchmark', {})
suite = BenchmarkSuite('MySuite', executor, '', '%(benchmark)s %(cores)s %(input)s', None, None, [], None, None, None)
benchmark = Benchmark('TestBench', 'TestBench', None, suite, None, '3', ExpRunDetails.empty(), None, DataStore(TestDummyUI()))
return RunId(benchmark, 1, 2, None, None)
def _create_hardcoded_run_id():
executor = Executor('MyVM', 'foo_bar_path', 'foo_bar_bin', None, None, None, None, None, None, 'benchmark', {})
suite = BenchmarkSuite('MySuite', executor, '', '%(benchmark)s %(cores)s 2 3', None, None, [], None, None, None)
benchmark = Benchmark('TestBench', 'TestBench', None, suite, None, None, ExpRunDetails.empty(), None, DataStore(TestDummyUI()))
return RunId(benchmark, 1, None, None, None)
def test_hardcoded_equals_template_constructed(self):
hard_coded = self._create_hardcoded_run_id()
template = self._create_template_run_id()
self.assertEqual(hard_coded.cmdline(), template.cmdline())
self.assertEqual(hard_coded, template)
self.assertTrue((hard_coded == template))
self.assertFalse((hard_coded is template)) |
def matching_by_voting(src_token_list, tgt_token_list, tgt_attr_list):
assert (len(src_token_list) <= len(tgt_token_list))
assert (len(tgt_token_list) == len(tgt_attr_list))
src_attr_list = []
idx_tgt = 0
for src_token in src_token_list:
attr_buff = []
idx_char = 0
while (idx_tgt < len(tgt_token_list)):
idx_char_new = src_token.find(tgt_token_list[idx_tgt], idx_char)
if (idx_char_new < 0):
logging.warning('For matching_by_voting, src: {}, tgt:{}'.format(src_token, str(tgt_token_list)))
break
attr_buff.append(tgt_attr_list[idx_tgt])
idx_char = (idx_char_new + len(tgt_token_list[idx_tgt]))
idx_tgt += 1
if (idx_char == len(src_token)):
break
counter = Counter(attr_buff)
src_attr_list.append(counter.most_common()[0][0])
assert (len(src_token_list) == len(src_attr_list))
return src_attr_list |
def _arg_val(arg):
if arg.HasField('f'):
return str(arg.f)
if arg.HasField('i'):
return str(arg.i)
if arg.HasField('s'):
return _sanitize_str(arg.s)
if arg.floats:
return str(list(arg.floats))
if arg.ints:
return str(list(arg.ints))
if arg.strings:
return str([_sanitize_str(s) for s in arg.strings])
return '[]' |
class VAEEncoder(nn.Module):
_encoder: EncoderWithAction
_mu: nn.Module
_logstd: nn.Module
_min_logstd: float
_max_logstd: float
_latent_size: int
def __init__(self, encoder: EncoderWithAction, hidden_size: int, latent_size: int, min_logstd: float=(- 20.0), max_logstd: float=2.0):
super().__init__()
self._encoder = encoder
self._mu = nn.Linear(hidden_size, latent_size)
self._logstd = nn.Linear(hidden_size, latent_size)
self._min_logstd = min_logstd
self._max_logstd = max_logstd
self._latent_size = latent_size
def forward(self, x: TorchObservation, action: torch.Tensor) -> Normal:
h = self._encoder(x, action)
mu = self._mu(h)
logstd = self._logstd(h)
clipped_logstd = logstd.clamp(self._min_logstd, self._max_logstd)
return Normal(mu, clipped_logstd.exp())
def __call__(self, x: TorchObservation, action: torch.Tensor) -> Normal:
return super().__call__(x, action)
def latent_size(self) -> int:
return self._latent_size |
def decode(z):
sents = []
i = 0
while (i < len(z)):
zi = torch.tensor(z[i:(i + args.batch_size)], device=device)
outputs = model.generate(zi, args.max_len, args.dec).t()
for s in outputs:
sents.append([vocab.idx2word[id] for id in s[1:]])
i += args.batch_size
return strip_eos(sents) |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) |
class ThinkAgent():
def __init__(self, llm, context_len=2000):
self.type = 'Think_Webrun_Agent'
self.name = f'{self.type}_{self.life_label}'
self.llm = llm
self.context_len = context_len
self.task = None
def action_parser(self, text):
nor_text = text.strip().lower()
if nor_text.startswith('think'):
query = get_query(nor_text)
return f'think[{query}]'
return text
def prompt_layer(self, agent, available_actions):
one_shot = pre_prompt.click
prompt = f'''{one_shot}{self.observations[self.cur_session][0]}
Action:'''
actions_prompt = f'current available action is {self.avai_action_prompt(available_actions)}'
prompt = f'''
Action:'''
return prompt
def llm_layer(self, prompt):
return self.llm(prompt)
def forward(self, control_prompt, available_actions):
prompt = self.prompt_layer(control_prompt, available_actions)
action = self.llm_layer(prompt).lstrip(' ')
action = self.action_parser(action, available_actions)
return action |
.experimental
def test_works(log, model):
try:
pred = model.fit_predict(log, k=1)
assert (pred.count() == 4)
except:
pytest.fail() |
class LinearWarmupScheduler(BaseLearningRateScheduler):
def __init__(self, scheduler, warmup_iter):
self.scheduler = scheduler
self.warmup_iter = warmup_iter
def get_learning_rate(self, iter):
lr = self.scheduler.get_learning_rate(iter)
if (iter < self.warmup_iter):
lr *= (((iter + 1) * 1.0) / self.warmup_iter)
return lr |
def _set_SIGCHLD_handler():
if (sys.platform == 'win32'):
return
if (not isinstance(threading.current_thread(), threading._MainThread)):
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if (not callable(previous_handler)):
previous_handler = None
def handler(signum, frame):
_error_if_any_worker_fails()
if (previous_handler is not None):
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True |
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__, allow_abbrev=False)
group = parser.add_argument_group('General Options')
opts.add_general_flags(group)
group = parser.add_argument_group('Dataset Options')
opts.add_dataset_flags(group)
group = parser.add_argument_group('Model Options')
opts.add_model_flags(group)
args = parser.parse_args(argv)
if (args.student_state_file is None):
parser.error('You should set --model-state-file (student) to reload a model state.')
return args |
def _get_logger(name=None, level='INFO'):
level = _get_level(level)
if (name is None):
name = ROOT_NAME
assert isinstance(name, str)
if (not name.startswith(ROOT_NAME)):
name = '{}.{}'.format(ROOT_NAME, name)
logger = logging.getLogger(name)
logger.setLevel(level)
return logger |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.