code stringlengths 101 5.91M |
|---|
class Cache():
def __init__(self, capacity):
self._cache = OrderedDict()
self._capacity = int(capacity)
if (capacity <= 0):
raise ValueError('capacity must be a positive integer')
def capacity(self):
return self._capacity
def size(self):
return len(self._cache)
def put(self, key, val):
if (key in self._cache):
return
if (len(self._cache) >= self.capacity):
self._cache.popitem(last=False)
self._cache[key] = val
def get(self, key, default=None):
val = (self._cache[key] if (key in self._cache) else default)
return val |
class IteratorTest(AllenNlpTestCase):
def setUp(self):
super(IteratorTest, self).setUp()
self.token_indexers = {'tokens': SingleIdTokenIndexer()}
self.vocab = Vocabulary()
self.this_index = self.vocab.add_token_to_namespace('this')
self.is_index = self.vocab.add_token_to_namespace('is')
self.a_index = self.vocab.add_token_to_namespace('a')
self.sentence_index = self.vocab.add_token_to_namespace('sentence')
self.another_index = self.vocab.add_token_to_namespace('another')
self.yet_index = self.vocab.add_token_to_namespace('yet')
self.very_index = self.vocab.add_token_to_namespace('very')
self.long_index = self.vocab.add_token_to_namespace('long')
self.instances = [self.create_instance(['this', 'is', 'a', 'sentence']), self.create_instance(['this', 'is', 'another', 'sentence']), self.create_instance(['yet', 'another', 'sentence']), self.create_instance(['this', 'is', 'a', 'very', 'very', 'very', 'very', 'long', 'sentence']), self.create_instance(['sentence'])]
self.dataset = Dataset(self.instances)
def create_instance(self, str_tokens: List[str]):
tokens = [Token(t) for t in str_tokens]
instance = Instance({'text': TextField(tokens, self.token_indexers)})
instance.index_fields(self.vocab)
return instance
def assert_instances_are_correct(self, candidate_instances):
candidate_instances = [tuple((w for w in instance if (w != 0))) for instance in candidate_instances]
expected_instances = [tuple(instance.fields['text']._indexed_tokens['tokens']) for instance in self.instances]
assert (set(candidate_instances) == set(expected_instances)) |
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
self.len = len(self.prod)
self.usyms = []
for s in self.prod:
if (s not in self.usyms):
self.usyms.append(s)
self.lr_items = []
self.lr_next = None
if self.prod:
self.str = ('%s -> %s' % (self.name, ' '.join(self.prod)))
else:
self.str = ('%s -> <empty>' % self.name)
def __str__(self):
return self.str
def __repr__(self):
return (('Production(' + str(self)) + ')')
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
def lr_item(self, n):
if (n > len(self.prod)):
return None
p = LRItem(self, n)
try:
p.lr_after = self.Prodnames[p.prod[(n + 1)]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[(n - 1)]
except IndexError:
p.lr_before = None
return p
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func] |
class cv_Yolo():
def __init__(self, yolo_path, confidence=0.5, threshold=0.3):
self.confidence = confidence
self.threshold = threshold
labels_path = os.path.sep.join([yolo_path, 'coco.names'])
self.labels = open(labels_path).read().split('\n')
np.random.seed(42)
self.colors = np.random.randint(0, 255, size=(len(self.labels), 3), dtype='uint8')
weights_path = os.path.sep.join([yolo_path, 'yolov3.weights'])
cfg_path = os.path.sep.join([yolo_path, 'yolov3.cfg'])
self.net = cv2.dnn.readNetFromDarknet(cfg_path, weights_path)
def detect(self, image):
(H, W) = image.shape[:2]
ln = self.net.getLayerNames()
ln = [ln[(i[0] - 1)] for i in self.net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, (1 / 255.0), (416, 416), swapRB=True, crop=False)
self.net.setInput(blob)
output = self.net.forward(ln)
detections = []
boxes = []
confidences = []
class_ids = []
for output in output:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if (confidence > self.confidence):
box = (detection[0:4] * np.array([W, H, W, H]))
(centerX, centerY, width, height) = box.astype('int')
x = int((centerX - (width / 2)))
y = int((centerY - (height / 2)))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confidence, self.threshold)
if (len(idxs) > 0):
for i in idxs.flatten():
top_left = (boxes[i][0], boxes[i][1])
bottom_right = ((top_left[0] + boxes[i][2]), (top_left[1] + boxes[i][3]))
box_2d = [top_left, bottom_right]
class_ = self.get_class(class_ids[i])
if (class_ == 'person'):
class_ = 'pedestrian'
detections.append(Detection(box_2d, class_))
return detections
def get_class(self, class_id):
return self.labels[class_id] |
class TestActivationCheckpointing(unittest.TestCase):
def _test_checkpoint_wrapper(self, device, log_memory_usage=False):
def get_loss_and_gnorm(model):
torch.manual_seed(1)
input = torch.rand(2, 16, 32).requires_grad_(True).to(device)
model.zero_grad()
loss = model(input).sum()
loss.backward()
gnorm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]))
return {'loss': loss, 'gnorm': gnorm}
model = Model().to(device)
no_cpt = get_loss_and_gnorm(model)
model = Model(use_pytorch_checkpoint=True).to(device)
pyt_cpt = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt['loss'], pyt_cpt['loss'])
torch.testing.assert_allclose(no_cpt['gnorm'], pyt_cpt['gnorm'])
model = Model(use_fairseq_checkpoint=True).to(device)
fairseq_cpt = get_loss_and_gnorm(model)
torch.testing.assert_allclose(no_cpt['loss'], fairseq_cpt['loss'])
torch.testing.assert_allclose(no_cpt['gnorm'], fairseq_cpt['gnorm'])
def test_checkpoint_wrapper_cpu(self):
self._test_checkpoint_wrapper(device=torch.device('cpu'))
((not torch.cuda.is_available()), 'test requires a GPU')
def test_checkpoint_wrapper_cuda(self):
self._test_checkpoint_wrapper(device=torch.device('cuda')) |
class IBMCloudConfig(AuthenticationConfig):
ibmcloud_access_id: Optional[str] = None
ibmcloud_secret_key: Optional[str] = None
ibmcloud_iam_key: Optional[str] = None
ibmcloud_iam_endpoint: Optional[str] = None
ibmcloud_useragent: Optional[str] = None
ibmcloud_resource_group_id: Optional[str] = None
ibmcloud_enabled: bool = False
def make_auth_provider(self) -> compute.IBMCloudAuthentication:
return compute.IBMCloudAuthentication(config=self) |
class TestModifiers():
def test_run(self):
img_path = os.path.join(ds_path, 'images')
shutil.rmtree((ds_path + '#dir_modifier'), ignore_errors=True)
mod = DSModifier_dir()
mod.modify(data_input=img_path)
assert os.path.exists((ds_path + '#dir_modifier/images/.jpg')), 'DSModifier_dir Failed'
shutil.rmtree((ds_path + '#dir_modifier'), ignore_errors=True)
shutil.rmtree((ds_path + '#jpg85_modifier'), ignore_errors=True)
jpg85 = DSModifier_jpg(params={'quality': 85})
jpg85.modify(data_input=img_path)
assert os.path.exists((ds_path + '#jpg85_modifier/images/.jpg')), 'DSModifier_jpg Failed'
shutil.rmtree((ds_path + '#jpg85_modifier'), ignore_errors=True)
shutil.rmtree((ds_path + '#quant5_modifier'), ignore_errors=True)
q5 = DSModifier_quant(params={'bits': 5})
q5.modify(data_input=img_path)
assert os.path.exists((ds_path + '#quant5_modifier/images/.jpg')), 'DSModifier_quant Failed'
shutil.rmtree((ds_path + '#quant5_modifier'), ignore_errors=True)
shutil.rmtree((ds_path + '#jpg85_modifier#quant5_modifier'), ignore_errors=True)
jpg85_mas_q5 = DSModifier_quant(params={'bits': 5}, ds_modifier=jpg85)
jpg85_mas_q5.modify(data_input=img_path)
assert os.path.exists((ds_path + '#jpg85_modifier#quant5_modifier/images/.jpg')), 'JPG+Quant modifier Failed'
shutil.rmtree((ds_path + '#jpg85_modifier#quant5_modifier'), ignore_errors=True)
params = {'initial_rer': 0.54, 'rer': 0.1}
blurimgmodif = DSModifier_rer(params=params)
name = blurimgmodif.name
shutil.rmtree((ds_path + f'#{name}'), ignore_errors=True)
blurimgmodif.modify(data_input=img_path)
assert os.path.exists((ds_path + f'#{name}/images/.jpg')), 'DSModifierBlur Failed'
shutil.rmtree((ds_path + f'#{name}'), ignore_errors=True)
blurimgmodif = DSModifier_rer()
params = blurimgmodif.params
name = f"rer{params['rer']}_modifier"
shutil.rmtree((ds_path + f'#{name}'), ignore_errors=True)
blurimgmodif.modify(data_input=img_path)
assert os.path.exists((ds_path + f'#{name}/images/.jpg')), 'DSModifierBlur Failed'
shutil.rmtree((ds_path + f'#{name}'), ignore_errors=True)
noisemodif = DSModifier_snr()
shutil.rmtree(((ds_path + '#') + noisemodif.name), ignore_errors=True)
noisemodif.modify(data_input=img_path)
assert os.path.exists((((ds_path + '#') + noisemodif.name) + '/images/.jpg')), 'DSModifierNoise Failed'
shutil.rmtree(((ds_path + '#') + noisemodif.name), ignore_errors=True)
shutil.rmtree((ds_path + '#blur2.0_modifier'), ignore_errors=True)
blur2 = DSModifier_blur(params={'sigma': 2.0})
blur2.modify(data_input=img_path)
assert os.path.exists((ds_path + '#blur2.0_modifier/images/.jpg')), 'DSModifier_blur Failed'
shutil.rmtree((ds_path + '#blur2.0_modifier'), ignore_errors=True)
shutil.rmtree((ds_path + '#sharpness2.0_modifier'), ignore_errors=True)
sharpness2 = DSModifier_sharpness(params={'sharpness': 2.0})
sharpness2.modify(data_input=img_path)
assert os.path.exists((ds_path + '#sharpness2.0_modifier/images/.jpg')), 'DSModifier_sharpness Failed'
shutil.rmtree((ds_path + '#sharpness2.0_modifier'), ignore_errors=True)
name = f'gsd{(0.3 * 2.0)}_modifier'
gsd = 0.6
shutil.rmtree((ds_path + f'#gsd{gsd}_modifier'), ignore_errors=True)
gsd2 = DSModifier_gsd(params={'scale': 2.0, 'interpolation': 2, 'resol': 0.3})
gsd2.modify(data_input=img_path)
assert os.path.exists((ds_path + f'#gsd{gsd}_modifier/images/.jpg')), 'DSModifier_gsd Failed'
shutil.rmtree((ds_path + f'#gsd{gsd}_modifier'), ignore_errors=True) |
def _patch_arguments_(gm: GraphModule, mapping: Union[(Dict[(Node, int)], Dict[(int, Node)])], lint_and_recompile: bool=True):
def _patch_slice(s, mapping):
return slice(mapping.get(s.start, s.start), mapping.get(s.stop, s.stop), mapping.get(s.step, s.step))
graph = gm.graph
supported_types = (Node, str, int, float)
for node in graph.nodes:
new_args = []
for arg in node.args:
if isinstance(arg, tuple):
new_arg = []
for a in arg:
if isinstance(a, slice):
new_arg.append(_patch_slice(a, mapping))
else:
new_arg.append(mapping.get(a, a))
new_args.append(tuple(new_arg))
elif isinstance(arg, slice):
new_args.append(_patch_slice(arg, mapping))
elif isinstance(arg, supported_types):
new_args.append(mapping.get(arg, arg))
else:
new_args.append(arg)
node.args = tuple(new_args)
if lint_and_recompile:
graph.lint()
gm.recompile() |
class VELOLValidation(VELOL):
def __init__(self, dir_data, **kwargs):
super().__init__(dir_data, split='test', **kwargs)
self.transforms = tf.Compose([CenterCrop(size=self.crop_size), ImageToLDMTensor()]) |
def object2Element(ctxObj):
ctxElement = {}
ctxElement['entityId'] = ctxObj['entityId']
ctxElement['attributes'] = []
if ('attributes' in ctxObj):
for key in ctxObj['attributes']:
attr = ctxObj['attributes'][key]
ctxElement['attributes'].append({'name': key, 'type': attr['type'], 'value': attr['value']})
ctxElement['domainMetadata'] = []
if ('metadata' in ctxObj):
for key in ctxObj['metadata']:
meta = ctxObj['metadata'][key]
ctxElement['domainMetadata'].append({'name': key, 'type': meta['type'], 'value': meta['value']})
return ctxElement |
class MLTSVMTest(ClassifierBaseTest):
TEST_NEIGHBORS = 3
def classifiers(self):
return [MLTSVM(c_k=(2 ** (- 4)))]
def test_if_mlknn_classification_works_on_sparse_input(self):
for classifier in self.classifiers():
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
def test_if_mlknn_classification_works_on_dense_input(self):
for classifier in self.classifiers():
self.assertClassifierWorksWithSparsity(classifier, 'dense')
def test_if_mlknn_works_with_cross_validation(self):
for classifier in self.classifiers():
self.assertClassifierWorksWithCV(classifier) |
def matmul_flop_jit(inputs: List[Any], outputs: List[Any]) -> typing.Counter[str]:
input_shapes = [get_shape(v) for v in inputs]
assert (len(input_shapes) == 2), input_shapes
assert (input_shapes[0][(- 1)] == input_shapes[1][(- 2)]), input_shapes
flop = (prod(input_shapes[0]) * input_shapes[(- 1)][(- 1)])
flop_counter = Counter({'matmul': flop})
return flop_counter |
def register_Ns3LtePdcpSapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LtePdcpSapProvider const &', 'arg0')])
cls.add_method('TransmitPdcpSdu', 'void', [param('ns3::LtePdcpSapProvider::TransmitPdcpSduParameters', 'params')], is_pure_virtual=True, is_virtual=True)
return |
def test_qmanager__measure_density():
NUM_TESTS = 1000
qm = QuantumManagerDensity()
meas_0 = []
meas_1 = []
state_single = [math.sqrt((1 / 2)), math.sqrt((1 / 2))]
state = np.outer(state_single, state_single)
for _ in range(NUM_TESTS):
key = qm.new()
samp = np.random.random()
res = qm._measure(state, [key], [key], samp)
if res[key]:
meas_1.append(key)
else:
meas_0.append(key)
assert (abs(((len(meas_0) / NUM_TESTS) - 0.5)) < 0.1)
for key in meas_0:
assert np.all((qm.get(key).state == np.array([[1, 0], [0, 0]])))
for key in meas_1:
assert np.all((qm.get(key).state == np.array([[0, 0], [0, 1]])))
meas_0 = []
meas_1 = []
state = [[0.5, 0], [0, 0.5]]
for _ in range(NUM_TESTS):
key = qm.new()
samp = np.random.random()
res = qm._measure(state, [key], [key], samp)
if res[key]:
meas_1.append(key)
else:
meas_0.append(key)
assert (abs(((len(meas_0) / NUM_TESTS) - 0.5)) < 0.1)
meas_0 = []
meas_1 = []
for _ in range(NUM_TESTS):
key1 = qm.new(state)
key2 = qm.new()
samp = np.random.random()
circuit = Circuit(2)
circuit.measure(0)
res = qm.run_circuit(circuit, [key1, key2], samp)
if res[key1]:
meas_1.append(key1)
else:
meas_0.append(key1)
assert (abs(((len(meas_0) / NUM_TESTS) - 0.5)) < 0.1)
meas_0 = []
meas_2 = []
for _ in range(NUM_TESTS):
key1 = qm.new(state)
key2 = qm.new()
samp = np.random.random()
circuit = Circuit(2)
circuit.measure(0)
circuit.measure(1)
res = qm.run_circuit(circuit, [key1, key2], samp)
if res[key1]:
meas_2.append(key1)
else:
meas_0.append(key1)
assert (res[key2] == 0)
assert (abs(((len(meas_0) / NUM_TESTS) - 0.5)) < 0.1) |
def get_slot_code_by_name(scope, slot_name):
slot = get_slot_by_name(slot_name)
return slot.slot_code(scope) |
class ReducerConfig(Config):
compute_bundle_dir = None
models_dir = None
initial_model = None
storage_backend = {'type': 's3', 'settings': {'bucket': 'models'}}
def __init__(self):
pass |
class WhileScope(ControlFlowScope):
header: cf.WhileScope
def as_string(self, indent: int=0):
result = ((indent * INDENTATION) + f'''while {self.header.test.as_string}:
''')
return (result + super().as_string(indent)) |
def load_vocab(vocab_file):
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab |
def test_omop_concept_code_labeler(tmp_path: pathlib.Path):
time_horizon = TimeHorizon(datetime.timedelta(days=0), datetime.timedelta(days=10))
ontology = DummyOntology_OMOPConcept()
labeler = DummyLabeler_OMOPConcept(ontology, time_horizon, prediction_codes=['1', '2'])
assert (set(labeler.outcome_codes) == {'OMOP_CONCEPT_A_CHILD_CHILD', 'OMOP_CONCEPT_B', 'OMOP_CONCEPT_B_CHILD', 'OMOP_CONCEPT_A_CHILD2', 'OMOP_CONCEPT_A', 'OMOP_CONCEPT_A_CHILD'})
assert (labeler.prediction_codes == ['1', '2'])
assert (labeler.get_time_horizon() == time_horizon) |
class Experiment(object):
def __init__(self, name, experiments_path, results_path, global_configuration, experiment_configuration, seed):
self._name = name
self._experiments_path = experiments_path
self._results_path = results_path
self._global_configuration = global_configuration
self._experiment_configuration = experiment_configuration
self._seed = seed
if (not os.path.isdir(results_path)):
ConsoleLogger.status('Creating results directory at path: {}'.format(results_path))
os.mkdir(results_path)
else:
ConsoleLogger.status('Results directory already created at path: {}'.format(results_path))
if (not os.path.isdir(experiments_path)):
ConsoleLogger.status('Creating experiments directory at path: {}'.format(experiments_path))
os.mkdir(experiments_path)
else:
ConsoleLogger.status('Experiments directory already created at path: {}'.format(experiments_path))
experiments_configuration_path = (((experiments_path + os.sep) + name) + '_configuration.yaml')
configuration_file_already_exists = (True if os.path.isfile(experiments_configuration_path) else False)
if (not configuration_file_already_exists):
self._device_configuration = DeviceConfiguration.load_from_configuration(global_configuration)
self._configuration = copy.deepcopy(self._global_configuration)
for experiment_key in experiment_configuration.keys():
if (experiment_key in self._configuration):
self._configuration[experiment_key] = experiment_configuration[experiment_key]
with open(experiments_configuration_path, 'w') as file:
yaml.dump(self._configuration, file)
else:
with open(experiments_configuration_path, 'r') as file:
self._configuration = yaml.load(file, Loader=yaml.FullLoader)
self._device_configuration = DeviceConfiguration.load_from_configuration(self._configuration)
if configuration_file_already_exists:
(self._trainer, self._evaluator, self._configuration, self._device_configuration) = PipelineFactory.load(self._experiments_path, self._name, self._results_path)
else:
(self._trainer, self._evaluator) = PipelineFactory.build(self._configuration, self._device_configuration, self._experiments_path, self._name, self._results_path)
def device_configuration(self):
return self._device_configuration
def experiment_path(self):
return self._experiments_path
def name(self):
return self._name
def seed(self):
return self._seed
def results_path(self):
return self._results_path
def configuration(self):
return self._experiment_configuration
def train(self):
ConsoleLogger.status("Running the experiment called '{}'".format(self._name))
ConsoleLogger.status('Begins to train the model')
self._trainer.train()
ConsoleLogger.success("Succeed to runned the experiment called '{}'".format(self._name))
def evaluate(self, evaluation_options):
ConsoleLogger.status("Running the experiment called '{}'".format(self._name))
ConsoleLogger.status('Begins to evaluate the model')
self._evaluator.evaluate(evaluation_options)
ConsoleLogger.success("Succeed to runned the experiment called '{}'".format(self._name)) |
class AnnotateEM():
def __init__(self, collection, qas):
qas = load_qas_(qas)
collection = Collection.cast(collection)
self.parallel_pool = Pool(30)
print_message('#> Tokenize the answers in the Q&As in parallel...')
qas = list(self.parallel_pool.map(tokenize_all_answers, qas))
qid2answers = {qid: tok_answers for (qid, _, tok_answers) in qas}
assert (len(qas) == len(qid2answers)), (len(qas), len(qid2answers))
(self.qas, self.collection) = (qas, collection)
self.qid2answers = qid2answers
def annotate(self, ranking):
rankings = Ranking.cast(ranking)
print_message('#> Lookup passages from PIDs...')
expanded_rankings = [(qid, pid, rank, self.collection[pid], self.qid2answers[qid]) for (qid, pid, rank, *_) in rankings.tolist()]
print_message('#> Assign labels in parallel...')
labeled_rankings = list(self.parallel_pool.map(assign_label_to_passage, enumerate(expanded_rankings)))
self.qid2rankings = groupby_first_item(labeled_rankings)
(self.num_judged_queries, self.num_ranked_queries) = check_sizes(self.qid2answers, self.qid2rankings)
(self.success, self.counts) = self._compute_labels(self.qid2answers, self.qid2rankings)
print(rankings.provenance(), self.success)
return Ranking(data=self.qid2rankings, provenance=('AnnotateEM', rankings.provenance()))
def _compute_labels(self, qid2answers, qid2rankings):
cutoffs = [1, 5, 10, 20, 30, 50, 100, 1000, 'all']
success = {cutoff: 0.0 for cutoff in cutoffs}
counts = {cutoff: 0.0 for cutoff in cutoffs}
for qid in qid2answers:
if (qid not in qid2rankings):
continue
prev_rank = 0
labels = []
for (pid, rank, label) in qid2rankings[qid]:
assert (rank == (prev_rank + 1)), (qid, pid, (prev_rank, rank))
prev_rank = rank
labels.append(label)
for cutoff in cutoffs:
if (cutoff != 'all'):
success[cutoff] += (sum(labels[:cutoff]) > 0)
counts[cutoff] += sum(labels[:cutoff])
else:
success[cutoff] += (sum(labels) > 0)
counts[cutoff] += sum(labels)
return (success, counts)
def save(self, new_path):
print_message('#> Dumping output to', new_path, '...')
Ranking(data=self.qid2rankings).save(new_path)
with Run().open(f'{new_path}.metrics', 'w') as f:
d = {'num_ranked_queries': self.num_ranked_queries, 'num_judged_queries': self.num_judged_queries}
extra = ('__WARNING' if (self.num_judged_queries != self.num_ranked_queries) else '')
d[f'success{extra}'] = {k: (v / self.num_judged_queries) for (k, v) in self.success.items()}
d[f'counts{extra}'] = {k: (v / self.num_judged_queries) for (k, v) in self.counts.items()}
f.write((format_metadata(d) + '\n')) |
def _get_psd_matrix(N):
from gpflow.kernels import SquaredExponential
x = np.linspace((- 1), 1, N).reshape((- 1), 1)
A = SquaredExponential()(x, full_cov=True).numpy()
return (A + (1e-06 * np.eye(N, dtype=A.dtype))) |
class DRN(nn.Module):
def __init__(self, block, layers, num_classes=1000, channels=(16, 32, 64, 128, 256, 512, 512, 512), out_map=False, out_middle=False, pool_size=28, arch='D'):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_map = out_map
self.out_dim = channels[(- 1)]
self.out_middle = out_middle
self.arch = arch
if (arch == 'C'):
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(BasicBlock, channels[0], layers[0], stride=1)
self.layer2 = self._make_layer(BasicBlock, channels[1], layers[1], stride=2)
elif (arch == 'D'):
self.layer0 = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
self.layer1 = self._make_conv_layers(channels[0], layers[0], stride=1)
self.layer2 = self._make_conv_layers(channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
self.layer5 = self._make_layer(block, channels[4], layers[4], dilation=2, new_level=False)
self.layer6 = (None if (layers[5] == 0) else self._make_layer(block, channels[5], layers[5], dilation=4, new_level=False))
if (arch == 'C'):
self.layer7 = (None if (layers[6] == 0) else self._make_layer(BasicBlock, channels[6], layers[6], dilation=2, new_level=False, residual=False))
self.layer8 = (None if (layers[7] == 0) else self._make_layer(BasicBlock, channels[7], layers[7], dilation=1, new_level=False, residual=False))
elif (arch == 'D'):
self.layer7 = (None if (layers[6] == 0) else self._make_conv_layers(channels[6], layers[6], dilation=2))
self.layer8 = (None if (layers[7] == 0) else self._make_conv_layers(channels[7], layers[7], dilation=1))
if (num_classes > 0):
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(self.out_dim, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, new_level=True, residual=True):
assert ((dilation == 1) or ((dilation % 2) == 0))
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm((planes * block.expansion)))
layers = list()
layers.append(block(self.inplanes, planes, stride, downsample, dilation=((1, 1) if (dilation == 1) else (((dilation // 2) if new_level else dilation), dilation)), residual=residual))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual, dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([nn.Conv2d(self.inplanes, channels, kernel_size=3, stride=(stride if (i == 0) else 1), padding=dilation, bias=False, dilation=dilation), BatchNorm(channels), nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
y = list()
if (self.arch == 'C'):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif (self.arch == 'D'):
x = self.layer0(x)
x = self.layer1(x)
y.append(x)
x = self.layer2(x)
y.append(x)
x = self.layer3(x)
y.append(x)
x = self.layer4(x)
y.append(x)
x = self.layer5(x)
y.append(x)
if (self.layer6 is not None):
x = self.layer6(x)
y.append(x)
if (self.layer7 is not None):
x = self.layer7(x)
y.append(x)
if (self.layer8 is not None):
x = self.layer8(x)
y.append(x)
if self.out_map:
x = self.fc(x)
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), (- 1))
if self.out_middle:
return (x, y)
else:
return x |
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), 'detectron2', '__init__.py')
init_py = open(init_py_path, 'r').readlines()
version_line = [l.strip() for l in init_py if l.startswith('__version__')][0]
version = version_line.split('=')[(- 1)].strip().strip('\'"')
suffix = os.getenv('D2_VERSION_SUFFIX', '')
version = (version + suffix)
if (os.getenv('BUILD_NIGHTLY', '0') == '1'):
from datetime import datetime
date_str = datetime.today().strftime('%y%m%d')
version = ((version + '.dev') + date_str)
new_init_py = [l for l in init_py if (not l.startswith('__version__'))]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, 'w') as f:
f.write(''.join(new_init_py))
return version |
def get_unique_stat_by_matcher(stats: List[Stat], matcher: MetricNameMatcher) -> Optional[Stat]:
matching_stats = [stat for stat in stats if matcher.matches(stat.name)]
if (len(matching_stats) == 0):
if (matcher.name == 'quasi_exact_match'):
hlog('WARNING: No quasi_exact_match metric found, looking for exact_match instead')
matcher = replace(matcher, name='exact_match')
matching_stats = [stat for stat in stats if matcher.matches(stat.name)]
if (len(matching_stats) == 0):
return None
else:
return None
if (matcher.sub_split is None):
stats_dict: Dict[(MetricName, Stat)] = {}
for stat in matching_stats:
stat = Stat(replace(stat.name, sub_split=None)).merge(stat)
merge_stat(stats_dict, stat)
matching_stats = list(stats_dict.values())
return singleton(matching_stats) |
def test_sum():
content2 = ak.contents.NumpyArray(np.array([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048], dtype=np.int64))
offsets3 = ak.index.Index64(np.array([0, 4, 8, 12], dtype=np.int64))
depth1 = ak.contents.ListOffsetArray(offsets3, content2)
assert (to_list(ak.sum(depth1, (- 1), highlevel=False)) == [(((1 + 2) + 4) + 8), (((16 + 32) + 64) + 128), (((256 + 512) + 1024) + 2048)])
assert (ak.sum(depth1.to_typetracer(), (- 1), highlevel=False).form == ak.sum(depth1, (- 1), highlevel=False).form)
assert (to_list(ak.sum(depth1, 1, highlevel=False)) == [(((1 + 2) + 4) + 8), (((16 + 32) + 64) + 128), (((256 + 512) + 1024) + 2048)])
assert (ak.sum(depth1.to_typetracer(), 1, highlevel=False).form == ak.sum(depth1, 1, highlevel=False).form)
assert (to_list(ak.sum(depth1, (- 2), highlevel=False)) == [((1 + 16) + 256), ((2 + 32) + 512), ((4 + 64) + 1024), ((8 + 128) + 2048)])
assert (ak.sum(depth1.to_typetracer(), (- 2), highlevel=False).form == ak.sum(depth1, (- 2), highlevel=False).form)
assert (to_list(ak.sum(depth1, 0, highlevel=False)) == [((1 + 16) + 256), ((2 + 32) + 512), ((4 + 64) + 1024), ((8 + 128) + 2048)])
assert (ak.sum(depth1.to_typetracer(), 0, highlevel=False).form == ak.sum(depth1, 0, highlevel=False).form) |
class F30KCaptionKarpathyDataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
if (split == 'train'):
names = ['f30k_caption_karpathy_train', 'f30k_caption_karpathy_val']
elif (split == 'val'):
names = ['f30k_caption_karpathy_test']
elif (split == 'test'):
names = ['f30k_caption_karpathy_test']
super().__init__(*args, **kwargs, names=names, text_column_name='caption')
def __getitem__(self, index):
return self.get_suite(index) |
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if (clip_norm is not None):
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[tf.reshape((grad if (grad is not None) else tf.zeros_like(v)), [numel(v)]) for (v, grad) in zip(var_list, grads)]) |
def main():
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = ArgumentParserShowHelpOnError(prog='Deep Deterministic Policy Gradient (DDPG)', description='Deep Deterministic Policy Gradient (DDPG) in Tensorflow 2')
parser.add_argument('--env', type=str, nargs='?', default='BipedalWalker-v3', help='The OpenAI Gym environment to train on, e.g. BipedalWalker-v3, LunarLanderContinuous-v2, Pendulum-v0')
parser.add_argument('--render_env', type=bool, nargs='?', default=True, help='Render the environment to be visually visible')
parser.add_argument('--train', type=bool, nargs='?', required=True, help='Train the network on the modified DDPG algorithm')
parser.add_argument('--use-noise', type=bool, nargs='?', required=True, help='OU Noise will be applied to the policy action')
parser.add_argument('--eps-greedy', type=float, nargs='?', default=0.95, help="The epsilon for Epsilon-greedy in the policy's action")
parser.add_argument('--warm-up', type=bool, nargs='?', default=1, help='Following recommendation from OpenAI Spinning Up, the actions in the early epochs can be set random to increase exploration. This warm up defines how many epochs are initially set to do this.')
parser.add_argument('--checkpoints-path', type=str, nargs='?', default='checkpoints/DDPG_', help='Save the weight of the network in the defined checkpoint file directory.')
parser.add_argument('--tf-log-dir', type=str, nargs='?', default='./logs/DDPG/', help='Save the logs of the training step.')
args = parser.parse_args()
env = gym.make(args.env)
action_space_high = env.action_space.high[0]
action_space_low = env.action_space.low[0]
brain = Brain(env.observation_space.shape[0], env.action_space.shape[0], action_space_high, action_space_low)
tensorboard = Tensorboard(log_dir=args.tf_log_dir)
logging.info('Loading weights from %s*, make sure the folder exists', args.checkpoints_path)
brain.load_weights(args.checkpoints_path)
acc_reward = tf.keras.metrics.Sum('reward', dtype=tf.float32)
actions_squared = tf.keras.metrics.Mean('actions', dtype=tf.float32)
Q_loss = tf.keras.metrics.Mean('Q_loss', dtype=tf.float32)
A_loss = tf.keras.metrics.Mean('A_loss', dtype=tf.float32)
ep_reward_list = []
avg_reward_list = []
with trange(TOTAL_EPISODES) as t:
for ep in t:
prev_state = env.reset()
acc_reward.reset_states()
actions_squared.reset_states()
Q_loss.reset_states()
A_loss.reset_states()
brain.noise.reset()
for _ in range(2000):
if args.render_env:
env.render()
cur_act = brain.act(tf.expand_dims(prev_state, 0), _notrandom=((ep >= args.warm_up) and (random.random() < (args.eps_greedy + (((1 - args.eps_greedy) * ep) / TOTAL_EPISODES)))), noise=args.use_noise)
(state, reward, done, _) = env.step(cur_act)
brain.remember(prev_state, reward, state, int(done))
if args.train:
(c, a) = brain.learn(brain.buffer.get_batch(unbalance_p=UNBALANCE_P))
Q_loss(c)
A_loss(a)
acc_reward(reward)
actions_squared(np.square((cur_act / action_space_high)))
prev_state = state
if done:
break
ep_reward_list.append(acc_reward.result().numpy())
avg_reward = np.mean(ep_reward_list[(- 40):])
avg_reward_list.append(avg_reward)
t.set_postfix(r=avg_reward)
tensorboard(ep, acc_reward, actions_squared, Q_loss, A_loss)
if (args.train and ((ep % 5) == 0)):
brain.save_weights(args.checkpoints_path)
env.close()
if args.train:
brain.save_weights(args.checkpoints_path)
logging.info('Training done...')
plt.plot(avg_reward_list)
plt.xlabel('Episode')
plt.ylabel('Avg. Epsiodic Reward')
plt.show() |
def test_conll_sysa():
assert check_correct(EXPECTED_CONLL_SYSA, _get_stats(CONLL_GOLD_UNSTITCHED, CONLL_SYSA_UNSTITCHED)) |
def test_simple_moves():
(board, player, game) = init_board_from_moves([4, 5, 4, 3, 0, 6])
expected = textwrap.dedent(' [[ 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 0. 0. 0.]\n [ 0. 0. 0. 0. 1. 0. 0.]\n [ 1. 0. 0. -1. 1. -1. -1.]]')
assert (expected == game.stringRepresentation(board)) |
class OpenAIGPTModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class Benchmark(LoggingBase):
def typename() -> str:
return 'Benchmark'
def benchmark(self):
return self._benchmark
def benchmark_path(self):
return self._benchmark_path
def benchmark_config(self) -> BenchmarkConfig:
return self._benchmark_config
def code_package(self) -> dict:
return self._code_package
def functions(self) -> Dict[(str, Any)]:
return self._functions
def code_location(self):
if self.code_package:
return os.path.join(self._cache_client.cache_dir, self.code_package['location'])
else:
return self._code_location
def is_cached(self):
return self._is_cached
_cached.setter
def is_cached(self, val: bool):
self._is_cached = val
def is_cached_valid(self):
return self._is_cached_valid
_cached_valid.setter
def is_cached_valid(self, val: bool):
self._is_cached_valid = val
def code_size(self):
return self._code_size
def language(self) -> 'Language':
return self._language
def language_name(self) -> str:
return self._language.value
def language_version(self):
return self._language_version
def hash(self):
path = os.path.join(self.benchmark_path, self.language_name)
self._hash_value = Benchmark.hash_directory(path, self._deployment_name, self.language_name)
return self._hash_value
def hash(self, val: str):
self._hash_value = val
def __init__(self, benchmark: str, deployment_name: str, config: 'ExperimentConfig', system_config: SeBSConfig, output_dir: str, cache_client: Cache, docker_client: docker.client):
super().__init__()
self._benchmark = benchmark
self._deployment_name = deployment_name
self._experiment_config = config
self._language = config.runtime.language
self._language_version = config.runtime.version
self._benchmark_path = find_benchmark(self.benchmark, 'benchmarks')
if (not self._benchmark_path):
raise RuntimeError('Benchmark {benchmark} not found!'.format(benchmark=self._benchmark))
with open(os.path.join(self.benchmark_path, 'config.json')) as json_file:
self._benchmark_config: BenchmarkConfig = BenchmarkConfig.deserialize(json.load(json_file))
if (self.language not in self.benchmark_config.languages):
raise RuntimeError('Benchmark {} not available for language {}'.format(self.benchmark, self.language))
self._cache_client = cache_client
self._docker_client = docker_client
self._system_config = system_config
self._hash_value = None
self._output_dir = os.path.join(output_dir, f'{benchmark}_code', self._language.value, self._language_version)
self.query_cache()
if config.update_code:
self._is_cached_valid = False
'\n Compute MD5 hash of an entire directory.\n '
def hash_directory(directory: str, deployment: str, language: str):
hash_sum = hashlib.md5()
FILES = {'python': ['*.py', 'requirements.txt*'], 'nodejs': ['*.js', 'package.json']}
WRAPPERS = {'python': '*.py', 'nodejs': '*.js'}
NON_LANG_FILES = ['*.sh', '*.json']
selected_files = (FILES[language] + NON_LANG_FILES)
for file_type in selected_files:
for f in glob.glob(os.path.join(directory, file_type)):
path = os.path.join(directory, f)
with open(path, 'rb') as opened_file:
hash_sum.update(opened_file.read())
wrappers = project_absolute_path('benchmarks', 'wrappers', deployment, language, WRAPPERS[language])
for f in glob.glob(wrappers):
path = os.path.join(directory, f)
with open(path, 'rb') as opened_file:
hash_sum.update(opened_file.read())
return hash_sum.hexdigest()
def serialize(self) -> dict:
return {'size': self.code_size, 'hash': self.hash}
def query_cache(self):
self._code_package = self._cache_client.get_code_package(deployment=self._deployment_name, benchmark=self._benchmark, language=self.language_name, language_version=self.language_version)
self._functions = self._cache_client.get_functions(deployment=self._deployment_name, benchmark=self._benchmark, language=self.language_name)
if (self._code_package is not None):
current_hash = self.hash
old_hash = self._code_package['hash']
self._code_size = self._code_package['size']
self._is_cached = True
self._is_cached_valid = (current_hash == old_hash)
else:
self._is_cached = False
self._is_cached_valid = False
def copy_code(self, output_dir):
FILES = {'python': ['*.py', 'requirements.txt*'], 'nodejs': ['*.js', 'package.json']}
path = os.path.join(self.benchmark_path, self.language_name)
for file_type in FILES[self.language_name]:
for f in glob.glob(os.path.join(path, file_type)):
shutil.copy2(os.path.join(path, f), output_dir)
nodejs_package_json = os.path.join(path, f'package.json.{self.language_version}')
if os.path.exists(nodejs_package_json):
shutil.copy2(nodejs_package_json, os.path.join(output_dir, 'package.json'))
def add_benchmark_data(self, output_dir):
cmd = '/bin/bash {benchmark_path}/init.sh {output_dir} false'
paths = [self.benchmark_path, os.path.join(self.benchmark_path, self.language_name)]
for path in paths:
if os.path.exists(os.path.join(path, 'init.sh')):
subprocess.run(cmd.format(benchmark_path=path, output_dir=output_dir), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def add_deployment_files(self, output_dir):
handlers_dir = project_absolute_path('benchmarks', 'wrappers', self._deployment_name, self.language_name)
handlers = [os.path.join(handlers_dir, file) for file in self._system_config.deployment_files(self._deployment_name, self.language_name)]
for file in handlers:
shutil.copy2(file, os.path.join(output_dir))
def add_deployment_package_python(self, output_dir):
packages = self._system_config.deployment_packages(self._deployment_name, self.language_name)
if len(packages):
with open(os.path.join(output_dir, 'requirements.txt'), 'a') as out:
for package in packages:
out.write(package)
def add_deployment_package_nodejs(self, output_dir):
packages = self._system_config.deployment_packages(self._deployment_name, self.language_name)
if len(packages):
package_config = os.path.join(output_dir, 'package.json')
with open(package_config, 'r') as package_file:
package_json = json.load(package_file)
for (key, val) in packages.items():
package_json['dependencies'][key] = val
with open(package_config, 'w') as package_file:
json.dump(package_json, package_file, indent=2)
def add_deployment_package(self, output_dir):
from sebs.faas.function import Language
if (self.language == Language.PYTHON):
self.add_deployment_package_python(output_dir)
elif (self.language == Language.NODEJS):
self.add_deployment_package_nodejs(output_dir)
else:
raise NotImplementedError
def directory_size(directory: str):
from pathlib import Path
root = Path(directory)
sizes = [f.stat().st_size for f in root.glob('**/*') if f.is_file()]
return sum(sizes)
def install_dependencies(self, output_dir):
if ('build' not in self._system_config.docker_image_types(self._deployment_name, self.language_name)):
self.logging.info('There is no Docker build image for {deployment} run in {language}, thus skipping the Docker-based installation of dependencies.'.format(deployment=self._deployment_name, language=self.language_name))
else:
repo_name = self._system_config.docker_repository()
image_name = 'build.{deployment}.{language}.{runtime}'.format(deployment=self._deployment_name, language=self.language_name, runtime=self.language_version)
try:
self._docker_client.images.get(((repo_name + ':') + image_name))
except docker.errors.ImageNotFound:
try:
self.logging.info('Docker pull of image {repo}:{image}'.format(repo=repo_name, image=image_name))
self._docker_client.images.pull(repo_name, image_name)
except docker.errors.APIError:
raise RuntimeError('Docker pull of image {} failed!'.format(image_name))
if (not self._experiment_config.check_flag('docker_copy_build_files')):
volumes = {os.path.abspath(output_dir): {'bind': '/mnt/function', 'mode': 'rw'}}
package_script = os.path.abspath(os.path.join(self._benchmark_path, self.language_name, 'package.sh'))
if os.path.exists(package_script):
volumes[package_script] = {'bind': '/mnt/function/package.sh', 'mode': 'ro'}
PACKAGE_FILES = {'python': 'requirements.txt', 'nodejs': 'package.json'}
file = os.path.join(output_dir, PACKAGE_FILES[self.language_name])
if os.path.exists(file):
try:
self.logging.info('Docker build of benchmark dependencies in container of image {repo}:{image}'.format(repo=repo_name, image=image_name))
uid = os.getuid()
if (not self._experiment_config.check_flag('docker_copy_build_files')):
self.logging.info('Docker mount of benchmark code from path {path}'.format(path=os.path.abspath(output_dir)))
stdout = self._docker_client.containers.run('{}:{}'.format(repo_name, image_name), volumes=volumes, environment={'CONTAINER_UID': str(os.getuid()), 'CONTAINER_GID': str(os.getgid()), 'CONTAINER_USER': 'docker_user', 'APP': self.benchmark, 'PLATFORM': self._deployment_name.upper()}, remove=True, stdout=True, stderr=True)
else:
container = self._docker_client.containers.run('{}:{}'.format(repo_name, image_name), environment={'APP': self.benchmark}, user=uid, detach=True, tty=True, command='/bin/bash')
import tarfile
self.logging.info('Send benchmark code from path {path} to Docker instance'.format(path=os.path.abspath(output_dir)))
tar_archive = os.path.join(output_dir, os.path.pardir, 'function.tar')
with tarfile.open(tar_archive, 'w') as tar:
for f in os.listdir(output_dir):
tar.add(os.path.join(output_dir, f), arcname=f)
with open(tar_archive, 'rb') as data:
container.put_archive('/mnt/function', data.read())
(exit_code, stdout) = container.exec_run(cmd='/bin/bash /sebs/installer.sh', user='docker_user', stdout=True, stderr=True)
(data, stat) = container.get_archive('/mnt/function')
with open(tar_archive, 'wb') as f:
for chunk in data:
f.write(chunk)
with tarfile.open(tar_archive, 'r') as tar:
tar.extractall(output_dir)
for f in os.listdir(os.path.join(output_dir, 'function')):
shutil.move(os.path.join(output_dir, 'function', f), os.path.join(output_dir, f))
shutil.rmtree(os.path.join(output_dir, 'function'))
container.stop()
for line in stdout.decode('utf-8').split('\n'):
if ('size' in line):
self.logging.info('Docker build: {}'.format(line))
except docker.errors.ContainerError as e:
self.logging.error('Package build failed!')
self.logging.error(e)
self.logging.error(f'Docker mount volumes: {volumes}')
raise e
def recalculate_code_size(self):
self._code_size = Benchmark.directory_size(self._output_dir)
return self._code_size
def build(self, deployment_build_step: Callable[([str, str, str, str, bool], Tuple[(str, int)])]) -> Tuple[(bool, str)]:
if (self.is_cached and self.is_cached_valid):
self.logging.info('Using cached benchmark {} at {}'.format(self.benchmark, self.code_location))
return (False, self.code_location)
msg = ('no cached code package.' if (not self.is_cached) else 'cached code package is not up to date/build enforced.')
self.logging.info('Building benchmark {}. Reason: {}'.format(self.benchmark, msg))
self._code_package = None
if os.path.exists(self._output_dir):
shutil.rmtree(self._output_dir)
os.makedirs(self._output_dir)
self.copy_code(self._output_dir)
self.add_benchmark_data(self._output_dir)
self.add_deployment_files(self._output_dir)
self.add_deployment_package(self._output_dir)
self.install_dependencies(self._output_dir)
(self._code_location, self._code_size) = deployment_build_step(os.path.abspath(self._output_dir), self.language_name, self.language_version, self.benchmark, self.is_cached)
self.logging.info(('Created code package (source hash: {hash}), for run on {deployment}' + ' with {language}:{runtime}').format(hash=self.hash, deployment=self._deployment_name, language=self.language_name, runtime=self.language_version))
if self.is_cached:
self._cache_client.update_code_package(self._deployment_name, self.language_name, self)
else:
self._cache_client.add_code_package(self._deployment_name, self.language_name, self)
self.query_cache()
return (True, self._code_location)
'\n Locates benchmark input generator, inspect how many storage buckets\n are needed and launches corresponding storage instance, if necessary.\n\n :param client: Deployment client\n :param benchmark:\n :param benchmark_path:\n :param size: Benchmark workload size\n '
def prepare_input(self, storage: PersistentStorage, size: str):
benchmark_data_path = find_benchmark(self._benchmark, 'benchmarks-data')
mod = load_benchmark_input(self._benchmark_path)
buckets = mod.buckets_count()
storage.allocate_buckets(self.benchmark, buckets)
input_config = mod.generate_input(benchmark_data_path, size, storage.input, storage.output, storage.uploader_func)
return input_config
'\n This is used in experiments that modify the size of input package.\n This step allows to modify code package without going through the entire pipeline.\n '
def code_package_modify(self, filename: str, data: bytes):
if self.code_package_is_archive():
self._update_zip(self.code_location, filename, data)
new_size = ((self.code_package_recompute_size() / 1024.0) / 1024.0)
self.logging.info(f'Modified zip package {self.code_location}, new size {new_size} MB')
else:
raise NotImplementedError()
'\n AWS: .zip file\n Azure: directory\n '
def code_package_is_archive(self) -> bool:
if os.path.isfile(self.code_location):
extension = os.path.splitext(self.code_location)[1]
return (extension in ['.zip'])
return False
def code_package_recompute_size(self) -> float:
bytes_size = os.path.getsize(self.code_location)
self._code_size = bytes_size
return bytes_size
def _update_zip(zipname: str, filename: str, data: bytes):
import zipfile
import tempfile
(tmpfd, tmpname) = tempfile.mkstemp(dir=os.path.dirname(zipname))
os.close(tmpfd)
with zipfile.ZipFile(zipname, 'r') as zin:
with zipfile.ZipFile(tmpname, 'w') as zout:
zout.comment = zin.comment
for item in zin.infolist():
if (item.filename != filename):
zout.writestr(item, zin.read(item.filename))
os.remove(zipname)
os.rename(tmpname, zipname)
with zipfile.ZipFile(zipname, mode='a', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr(filename, data) |
class CARTOON_THRESH_METHODS(Enum):
BINARY = 'thresh_binary'
BINARY_INV = 'thresh_binary_inv'
TRIANGLE = 'thresh_triangle'
MASK = 'thresh_mask'
TRUNC = 'thresh_trunc'
OTSU = 'thresh_otsu'
TOZERO = 'thresh_tozero'
TOZERO_INV = 'thresh_tozero_inv' |
def restore_optimizer_state(optimizer):
optimizer.solver.set_states_from_protobuf(optimizer.proto)
if hasattr(optimizer, 'solver_checkpoint'):
(ext, handler) = optimizer.solver_checkpoint
if (ext == '.protobuf'):
optimizer.solver.set_states_from_protobuf(handler)
elif (ext == '.h5'):
from nnabla.utils.get_file_handle import load_solve_state_from_h5
optimizer.solver.set_states(load_solve_state_from_h5(None, handler))
handler.seek(0) |
def _capture_stream(is_origin=True):
torch.cuda.init()
return torch.cuda.Stream(_cdata=torch._C._cuda_getCaptureStream(is_origin)) |
def add_retrieved_documents(args, examples):
ret_object = DPRDoc_Retrieval(topk=args.topk, model_type=args.model_type)
for example in tqdm(examples):
responses = example['response_candidates']
context = example['context']
context_string = ' '.join(context[(- 2):])
response_docs = []
for (adp, response) in enumerate(responses):
inputstring = response
if args.use_context:
inputstring = ((' [eot] '.join(example['context'][(- 2):]) + ' [SEP] ') + response)
(passages_scores, passages) = ret_object.get_top_passages(inputstring)
pages = dict()
for (score, title, text) in zip(passages_scores, passages['title'], passages['text']):
if (title not in pages):
pages[title] = []
pages[title].append(text)
response_docs.append(pages)
example['response_docs'] = response_docs |
def plot_average_reward_per_n_rounds(rewards):
rewards_pd = pd.DataFrame(rewards)
rewards_pd = pd.melt(rewards_pd, ['n_rounds'])
rewards_pd['value'] = (rewards_pd['value'] / rewards_pd['n_rounds'])
plot = sns.lineplot(data=rewards_pd, x='n_rounds', y='value', style='variable', hue='variable', markers=True, dashes=False)
plot.legend(bbox_to_anchor=(1.1, 1.05)) |
class PLBartPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def add_loss_for_each_scale(scales_to_logits, labels, num_classes, ignore_label, loss_weight=1.0, upsample_logits=True, scope=None, loss_function='sce'):
if (labels is None):
raise ValueError('No label for softmax cross entropy loss.')
if (loss_function is None):
loss_function = 'sce'
for (scale, logits) in six.iteritems(scales_to_logits):
loss_scope = None
if scope:
loss_scope = ('%s_%s' % (scope, scale))
(logits, scaled_labels) = _prep_logits(logits, labels, upsample_logits)
if (loss_function == 'sce'):
scaled_labels = tf.reshape(scaled_labels, shape=[(- 1)])
not_ignore_mask = (tf.cast(tf.not_equal(scaled_labels, ignore_label), tf.float32) * loss_weight)
one_hot_labels = slim.one_hot_encoding(scaled_labels, num_classes, on_value=1.0, off_value=0.0)
tf.losses.softmax_cross_entropy(one_hot_labels, tf.reshape(logits, shape=[(- 1), num_classes]), weights=not_ignore_mask, scope=loss_scope)
elif ('lovasz' in loss_function):
classes_param = ('all' if ('all' in loss_function) else 'present')
logits = tf.nn.softmax(logits)
tf.losses.add_loss(lovasz_softmax(logits, scaled_labels, ignore=ignore_label, classes=classes_param))
else:
raise ValueError('loss_function not supported.') |
def von_mises_cdf_normalapprox(k, x):
b = ((np.sqrt((2 / np.pi)) * np.exp(k)) / i0(k))
z = (b * np.sin((x / 2.0)))
return scipy.stats.norm.cdf(z) |
def handle_special_chars(t):
t = re.sub('(\\w)-(\\w)', '\\1 \\2', t)
return re.sub('([%&\\/$*])', ' \\1 ', t) |
def barron_factor(x: sf.Matrix51, y: sf.Matrix51, mu: sf.Scalar, eps: sf.Scalar) -> sf.Matrix51:
alpha = BarronNoiseModel.compute_alpha_from_mu(mu, eps)
noise_model = BarronNoiseModel(alpha=alpha, delta=1, scalar_information=1, x_epsilon=eps)
return noise_model.whiten((x - y)) |
class VersionControl(object):
name = ''
dirname = ''
schemes = ()
unset_environ = ()
default_arg_rev = None
def __init__(self, url=None, *args, **kwargs):
self.url = url
super(VersionControl, self).__init__(*args, **kwargs)
def get_base_rev_args(self, rev):
raise NotImplementedError
def make_rev_options(self, rev=None, extra_args=None):
return RevOptions(self, rev, extra_args=extra_args)
def _is_local_repository(self, repo):
(drive, tail) = os.path.splitdrive(repo)
return (repo.startswith(os.path.sep) or drive)
def translate_egg_surname(self, surname):
return surname.replace('/', '_')
def export(self, location):
raise NotImplementedError
def get_url_rev(self, url):
error_message = "Sorry, '%s' is a malformed VCS url. The format is <vcs>+<protocol>://<url>, e.g. svn+
assert ('+' in url), (error_message % url)
url = url.split('+', 1)[1]
(scheme, netloc, path, query, frag) = urllib_parse.urlsplit(url)
rev = None
if ('' in path):
(path, rev) = path.rsplit('', 1)
url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
return (url, rev)
def get_url_rev_args(self, url):
return (url, [])
def get_url_rev_options(self, url):
(url, rev) = self.get_url_rev(url)
(url, extra_args) = self.get_url_rev_args(url)
rev_options = self.make_rev_options(rev, extra_args=extra_args)
return (url, rev_options)
def get_info(self, location):
assert (not location.rstrip('/').endswith(self.dirname)), ('Bad directory: %s' % location)
return (self.get_url(location), self.get_revision(location))
def normalize_url(self, url):
return urllib_parse.unquote(url).rstrip('/')
def compare_urls(self, url1, url2):
return (self.normalize_url(url1) == self.normalize_url(url2))
def fetch_new(self, dest, url, rev_options):
raise NotImplementedError
def switch(self, dest, url, rev_options):
raise NotImplementedError
def update(self, dest, rev_options):
raise NotImplementedError
def is_commit_id_equal(self, dest, name):
raise NotImplementedError
def obtain(self, dest):
(url, rev_options) = self.get_url_rev_options(self.url)
if (not os.path.exists(dest)):
self.fetch_new(dest, url, rev_options)
return
rev_display = rev_options.to_display()
if self.is_repository_directory(dest):
existing_url = self.get_url(dest)
if self.compare_urls(existing_url, url):
logger.debug('%s in %s exists, and has correct URL (%s)', self.repo_name.title(), display_path(dest), url)
if (not self.is_commit_id_equal(dest, rev_options.rev)):
logger.info('Updating %s %s%s', display_path(dest), self.repo_name, rev_display)
self.update(dest, rev_options)
else:
logger.info('Skipping because already up-to-date.')
return
logger.warning('%s %s in %s exists with URL %s', self.name, self.repo_name, display_path(dest), existing_url)
prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', ('s', 'i', 'w', 'b'))
else:
logger.warning('Directory %s already exists, and is not a %s %s.', dest, self.name, self.repo_name)
prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b'))
logger.warning('The plan is to install the %s repository %s', self.name, url)
response = ask_path_exists(('What to do? %s' % prompt[0]), prompt[1])
if (response == 'a'):
sys.exit((- 1))
if (response == 'w'):
logger.warning('Deleting %s', display_path(dest))
rmtree(dest)
self.fetch_new(dest, url, rev_options)
return
if (response == 'b'):
dest_dir = backup_dir(dest)
logger.warning('Backing up %s to %s', display_path(dest), dest_dir)
shutil.move(dest, dest_dir)
self.fetch_new(dest, url, rev_options)
return
if (response == 's'):
logger.info('Switching %s %s to %s%s', self.repo_name, display_path(dest), url, rev_display)
self.switch(dest, url, rev_options)
def unpack(self, location):
if os.path.exists(location):
rmtree(location)
self.obtain(location)
def get_src_requirement(self, dist, location):
raise NotImplementedError
def get_url(self, location):
raise NotImplementedError
def get_revision(self, location):
raise NotImplementedError
def run_command(self, cmd, show_stdout=True, cwd=None, on_returncode='raise', command_desc=None, extra_environ=None, spinner=None):
cmd = ([self.name] + cmd)
try:
return call_subprocess(cmd, show_stdout, cwd, on_returncode, command_desc, extra_environ, unset_environ=self.unset_environ, spinner=spinner)
except OSError as e:
if (e.errno == errno.ENOENT):
raise BadCommand(('Cannot find command %r - do you have %r installed and in your PATH?' % (self.name, self.name)))
else:
raise
def is_repository_directory(cls, path):
logger.debug('Checking in %s for %s (%s)...', path, cls.dirname, cls.name)
return os.path.exists(os.path.join(path, cls.dirname))
def controls_location(cls, location):
return cls.is_repository_directory(location) |
class ForecastExperiment(Experiment):
()
def instance(self, model_type: str, save_vals: Optional[bool]=True):
(train_set, train_loader) = get_data(flag='train')
(val_set, val_loader) = get_data(flag='val')
(test_set, test_loader) = get_data(flag='test')
model = get_model(model_type, dim_size=train_set.data_x.shape[1], datetime_feats=train_set.timestamps.shape[(- 1)]).to(default_device())
checkpoint = Checkpoint(self.root)
model = train(model, checkpoint, train_loader, val_loader, test_loader)
val_metrics = validate(model, loader=val_loader, report_metrics=True)
test_metrics = validate(model, loader=test_loader, report_metrics=True, save_path=(self.root if save_vals else None))
np.save(join(self.root, 'metrics.npy'), {'val': val_metrics, 'test': test_metrics})
val_metrics = {f'ValMetric/{k}': v for (k, v) in val_metrics.items()}
test_metrics = {f'TestMetric/{k}': v for (k, v) in test_metrics.items()}
checkpoint.close({**val_metrics, **test_metrics}) |
class IndexExtractorNodeLister(NodeVisitor):
def __init__(self):
self.nodes: List[ast_internal_classes.Array_Subscript_Node] = []
def visit_Call_Expr_Node(self, node: ast_internal_classes.Call_Expr_Node):
if (node.name.name in ['sqrt', 'exp', 'pow', 'max', 'min', 'abs', 'tanh']):
return self.generic_visit(node)
else:
return
def visit_Array_Subscript_Node(self, node: ast_internal_classes.Array_Subscript_Node):
self.nodes.append(node)
def visit_Execution_Part_Node(self, node: ast_internal_classes.Execution_Part_Node):
return |
class Resample2dFunction(Function):
def forward(ctx, input1, input2, kernel_size=1, bilinear=True):
assert input1.is_contiguous()
assert input2.is_contiguous()
ctx.save_for_backward(input1, input2)
ctx.kernel_size = kernel_size
ctx.bilinear = bilinear
(_, d, _, _) = input1.size()
(b, _, h, w) = input2.size()
output = input1.new(b, d, h, w).zero_()
resample2d_cuda.forward(input1, input2, output, kernel_size, bilinear)
return output
def backward(ctx, grad_output):
grad_output = grad_output.contiguous()
assert grad_output.is_contiguous()
(input1, input2) = ctx.saved_tensors
grad_input1 = Variable(input1.new(input1.size()).zero_())
grad_input2 = Variable(input1.new(input2.size()).zero_())
resample2d_cuda.backward(input1, input2, grad_output.data, grad_input1.data, grad_input2.data, ctx.kernel_size, ctx.bilinear)
return (grad_input1, grad_input2, None, None) |
def proxyless_base(pretrained=True, net_config=None, net_weight=None):
assert (net_config is not None), 'Please input a network config'
net_config_path = download_url(net_config)
net_config_json = json.load(open(net_config_path, 'r'))
if (net_config_json['name'] == ProxylessNASNets.__name__):
net = ProxylessNASNets.build_from_config(net_config_json)
else:
net = PyramidTreeNet.build_from_config(net_config_json)
if ('bn' in net_config_json):
net.set_bn_param(bn_momentum=net_config_json['bn']['momentum'], bn_eps=net_config_json['bn']['eps'])
if pretrained:
assert (net_weight is not None), 'Please specify network weights'
init_path = download_url(net_weight)
init = torch.load(init_path, map_location='cpu')
net.load_state_dict(init['state_dict'])
return net |
class VibrateLR(_LRScheduler):
def __init__(self, optimizer, total_iter, last_epoch=(- 1)):
self.total_iter = total_iter
super(VibrateLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
process = (self.last_epoch / self.total_iter)
f = 0.1
if (process < (3 / 8)):
f = (1 - ((process * 8) / 3))
elif (process < (5 / 8)):
f = 0.2
T = (self.total_iter // 80)
Th = (T // 2)
t = (self.last_epoch % T)
f2 = (t / Th)
if (t >= Th):
f2 = (2 - f2)
weight = (f * f2)
if (self.last_epoch < Th):
weight = max(0.1, weight)
return [(weight * group['initial_lr']) for group in self.optimizer.param_groups] |
def cached_property(func):
atrribute_name = f'_{func.__name__}'
def _wrapper(self):
try:
return getattr(self, atrribute_name)
except AttributeError:
val = func(self)
self.__dict__[atrribute_name] = val
return val
return property(_wrapper) |
def savemodel():
if PARAMS['use_cloud']:
savemodel_dir = (PARAMS['gcs_results'].rstrip('/') + f'/{str(VERSION)}')
else:
savemodel_dir = (get_results_dir(PARAMS['dataset']) + f'savemodel/{str(VERSION)}')
return Checkpoint(savemodel_dir) |
def main(args):
print('Dataset: {}, Label: {}, LR: {}'.format(args.dataset, args.label, args.lr))
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
model = utils.get_resnet_model(resnet_type=args.resnet_type)
model.fc = torch.nn.Linear(args.latent_dim_size, 1)
model = model.to(device)
utils.freeze_parameters(model, train_fc=True)
(train_loader, test_loader) = utils.get_loaders(dataset=args.dataset, label_class=args.label, batch_size=args.batch_size)
outliers_loader = utils.get_outliers_loader(args.batch_size)
train_model(model, train_loader, outliers_loader, test_loader, device, args.epochs, args.lr) |
def create_argparser():
defaults = dict(data_dir='', schedule_sampler='uniform', lr=0.0003, weight_decay=0.0, lr_anneal_steps=0, batch_size=1, microbatch=(- 1), ema_rate='0.9999', log_interval=10, save_interval=10000, resume_checkpoint='', use_fp16=False, fp16_scale_growth=0.001, model='MDT_S_2', mask_ratio=None, decode_layer=None)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--rank', default=0, type=int, help='rank for distrbuted training.')
add_dict_to_argparser(parser, defaults)
return parser |
def test_validate_references_nested_raises_value_error():
with pytest.raises(ValueError, match='Expected type'):
optplan.validate_references(optplan.Sum(functions=[optplan.Power(function=optplan.Sum(functions=[optplan.make_constant(2), optplan.SimulationSpace()])), optplan.make_constant(2)])) |
()
('policy_file', type=str)
('--seed', type=int, default=0)
('--n_test_rollouts', type=int, default=20)
('--render', type=click.Choice(['human', 'rgb_array']), default='rgb_array')
('--exploit', type=bool, default=True)
('--compute_q', type=bool, default=True)
('--collect_data', type=bool, default=True)
('--goal_generation', type=str, default='Zero')
('--note', type=str, default=None, help='unique notes')
def main(policy_file, seed, n_test_rollouts, render, exploit, compute_q, collect_data, goal_generation, note):
set_global_seeds(seed)
with open(policy_file, 'rb') as f:
policy = pickle.load(f)
env_name = policy.info['env_name']
params = config.DEFAULT_PARAMS
params['note'] = (note or params['note'])
if note:
with open((((('params/' + env_name) + '/') + note) + '.json'), 'r') as file:
override_params = json.loads(file.read())
params.update(**override_params)
if (env_name in config.DEFAULT_ENV_PARAMS):
params.update(config.DEFAULT_ENV_PARAMS[env_name])
params['env_name'] = env_name
goal_generation = params['goal_generation']
params = config.prepare_params(params)
config.log_params(params, logger=logger)
dims = config.configure_dims(params)
eval_params = {'exploit': exploit, 'use_target_net': params['test_with_polyak'], 'compute_Q': compute_q, 'rollout_batch_size': 1, 'render': render}
for name in ['T', 'gamma', 'noise_eps', 'random_eps']:
eval_params[name] = params[name]
evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)
evaluator.seed(seed)
evaluator.clear_history()
num_skills = params['num_skills']
if (goal_generation == 'Zero'):
generated_goal = np.zeros(evaluator.g.shape)
else:
generated_goal = False
for z in range(num_skills):
assert (evaluator.rollout_batch_size == 1)
z_s_onehot = np.zeros([evaluator.rollout_batch_size, num_skills])
z_s_onehot[(0, z)] = 1
base = os.path.splitext(policy_file)[0]
for i_test_rollouts in range(n_test_rollouts):
if ((render == 'rgb_array') or (render == 'human')):
(imgs, episode) = evaluator.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z_s_onehot)
end = '_test_{:02d}_exploit_{}_compute_q_{}_skill_{}.avi'.format(i_test_rollouts, exploit, compute_q, z)
test_filename = (base + end)
save_video(imgs[0], test_filename, lib='cv2')
else:
episode = evaluator.generate_rollouts(generated_goal=generated_goal, z_s_onehot=z_s_onehot)
if collect_data:
end = '_test_{:02d}_exploit_{}_compute_q_{}_skill_{}.txt'.format(i_test_rollouts, exploit, compute_q, z)
test_filename = (base + end)
with open(test_filename, 'w') as file:
file.write(json.dumps(episode['o'].tolist()))
for (key, val) in evaluator.logs('test'):
logger.record_tabular(key, np.mean(val))
logger.dump_tabular() |
class ColumnBroadcastOp():
Template = '\nusing ${instance_name} = cutlass::epilogue::threadblock::VisitorOpColumnBroadcast<\n ${element_accumulator}, ${element_fragment}, ${input_tile_iterator}>;\n'
counter = 0
def __init__(self, element_accumulator, element_fragment) -> None:
self.element_accumulator = element_accumulator
self.element_fragment = element_fragment
self.instance_name = ('ColumnBroadcastOp%d' % ColumnBroadcastOp.counter)
ColumnBroadcastOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [('broadcast_ptr', ctypes.c_void_p), ('batch_stride', ctypes.c_longlong)]
def __init__(self, broadcast_ptr, batch_stride=0):
self.broadcast_ptr = int(broadcast_ptr)
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {'instance_name': self.instance_name, 'element_accumulator': DataTypeTag[self.element_accumulator], 'element_fragment': DataTypeTag[self.element_fragment], 'input_tile_iterator': (operation.procedural_name() + '_default::Epilogue::OutputTileIterator')}
return SubstituteTemplate(self.Template, values) |
class NSP_Prompt():
def __init__(self, dataset_name=''):
(self.label_texts, self.template) = ([], '')
self.label_num = 0
if (dataset_name in ['SST-2', 'MR']):
self.label_texts = ['terrible', 'great']
self.template = 'A [label] piece of work'
self.is_pre = True
elif (dataset_name in ['CR']):
self.label_texts = ['terrible', 'great']
self.template = 'It was [label]'
self.template = 'A [label] piece of work'
self.is_pre = True
elif (dataset_name == 'Subj'):
self.label_texts = ['subjective', 'objective']
self.template = 'A [label] comment'
self.is_pre = True
elif (dataset_name == 'MPQA'):
self.label_texts = ['negative', 'positive']
self.template = 'It is [label]'
self.is_pre = True
elif (dataset_name == 'AGNews'):
self.label_texts = ['political', 'sports', 'business', 'technology']
self.template = 'A [label] news :'
self.is_pre = True
elif (dataset_name == 'Yahoo'):
self.label_texts = ['Society', 'Science', 'Health', 'Education', 'Computer', 'Sports', 'Business', 'Entertainment', 'Relationship', 'Politics']
self.template = '[label] question :'
self.is_pre = True
elif (dataset_name in ['EPRSTMT']):
self.label_texts = ['', '']
self.template = '[label]'
self.is_pre = True
elif (dataset_name in ['TNEWS', 'TNEWSK']):
self.label_texts = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
self.template = '[label]'
self.is_pre = True
elif (dataset_name in ['CSLDCP']):
self.label_texts = ['', '', '', '', '', '', '', '', '/', '', '', '', '', '', '', '/', '', '', '', '', '', '', '', '', '', '/', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '/', '', '', '', '', '/', '', '', '', '', '', '', '/', '', '', '', '/', '', '', '', '', '', '', '', '', '']
self.template = '[label]'
self.is_pre = True
elif (dataset_name in ['IFLYTEK']):
self.label_texts = ['', '', 'WIFI', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'MOBA', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'K', '', '', '', '', '', '', '', '', '', '()', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
self.template = '[label]'
self.is_pre = True
if (dataset_name in ['MNLI-m', 'MNLI-mm', 'SNLI']):
self.label_num = 3
elif (dataset_name in ['QNLI', 'RTE', 'MRPC', 'QQP']):
self.label_num = 2 |
.parametrize('nntxt_idx', CASE_INDEX)
.parametrize('parameter_format', ['.h5', '.protobuf'])
.parametrize('dataset_sample_num', [64])
.parametrize('batch_size', [16])
.parametrize('include_params', [False])
.parametrize('variable_batch_size', [True])
def test_load_and_save_equivalence(nntxt_idx, parameter_format, dataset_sample_num, batch_size, include_params, variable_batch_size):
verbose = True
a_few_iter = 10
half_iter = 5
output_network_topology = True
with generate_case_from_nntxt_str(NNTXT_EQUIVALENCE_CASES[nntxt_idx], parameter_format, dataset_sample_num, batch_size) as nnp_file:
with create_temp_with_dir('saved.nnp') as saved_nnp_file:
class Callback():
pass
class ModelSaver():
def __init__(self, info):
self.info = info
def __call__(self, config):
if (config.iter != half_iter):
return
info = self.info
datasets = []
with ExitStack() as stack:
for (d_name, d) in info.datasets.items():
ds = {}
ds['name'] = d_name
ds['uri'] = d.uri
ds['cache_dir'] = d.cache_dir
di_instance = stack.enter_context(d.data_iterator())
ds['variables'] = [var_name for var_name in di_instance.variables]
ds['batch_size'] = di_instance.batch_size
ds['no_image_normalization'] = (not d.normalize)
ds['shuffle'] = di_instance._shuffle
datasets.append(ds)
dataset_assign = set()
for obj in itertools.chain(info.monitors.values(), info.executors.values(), info.optimizers.values()):
for pv in obj.dataset_assign.keys():
dataset_assign.add(pv.name)
contents = {'global_config': {'default_context': info.global_config.default_context}, 'training_config': {'max_epoch': info.training_config.max_epoch, 'iter_per_epoch': info.training_config.iter_per_epoch, 'save_best': info.training_config.save_best}, 'networks': [{'name': n_name, 'batch_size': n.batch_size, 'outputs': {out: n.variables[out].variable_instance for out in n.outputs}, 'names': {inp: n.variables[inp].variable_instance for inp in itertools.chain(n.inputs, n.outputs)}} for (n_name, n) in info.networks.items()], 'executors': [{'name': e_name, 'network': e.network.name, 'data': [pv.name for pv in e.dataset_assign.keys()], 'generator_variables': [pv.name for pv in e.generator_assign.keys()], 'output': [pv.name for pv in e.output_assign.keys()]} for (e_name, e) in info.executors.items()], 'optimizers': [{'name': o_name, 'solver': o.solver, 'network': o.network.name, 'data_variables': {pv.name: d for (pv, d) in o.dataset_assign.items()}, 'generator_variables': [pv.name for pv in o.generator_assign.keys()], 'loss_variables': [pv.name for pv in o.loss_variables], 'dataset': [ds_name for ds_name in o.data_iterators.keys()], 'weight_decay': o.weight_decay, 'lr_decay': o.lr_decay, 'lr_decay_interval': o.lr_decay_interval, 'update_interval': o.update_interval} for (o_name, o) in info.optimizers.items()], 'datasets': datasets, 'monitors': [{'name': m_name, 'network': m.network.name, 'data_variables': {pv.name: d for (pv, d) in m.dataset_assign.items()}, 'generator_variables': [pv.name for pv in m.generator_assign.keys()], 'monitor_variables': [pv.name for pv in m.monitor_variables], 'dataset': [ds_name for ds_name in m.data_iterators.keys()]} for (m_name, m) in info.monitors.items()]}
save.save(saved_nnp_file, contents, include_params, variable_batch_size, include_solver_state=True, parameter_format=parameter_format)
new_config = TrainConfig()
new_config.start_iteration = 0
new_config.end_iteration = a_few_iter
new_config.save_optimizer_variable = False
new_config.save_evaluation_variable = False
new_cb = Callback()
new_cb.forward = (lambda x: x.target.forward(clear_no_need_grad=True))
new_cb.backward = (lambda x, b: x.target.backward(clear_buffer=True))
new_config.cb = new_cb
new_config.impl = 'ref'
ref_result = []
ref_info = load.load(nnp_file, batch_size=batch_size)
if output_network_topology:
for (n, opt) in ref_info.optimizers.items():
print(n)
opt.network.execute_on_proto(Verifier())
new_config.on_iter = ModelSaver(ref_info)
for (cost, error) in partial(train, config=new_config)(ref_info):
ref_result.append((cost, error))
new_config.on_iter = None
new_config.start_iteration = half_iter
new_config.end_iteration = a_few_iter
new_config.impl = 'new'
result = []
nn.clear_parameters()
info = load.load(saved_nnp_file, batch_size=batch_size)
if output_network_topology:
for (n, opt) in info.optimizers.items():
print(n)
opt.network.execute_on_proto(Verifier())
for (cost, error) in partial(train, config=new_config)(info):
result.append((cost, error))
compare_info(ref_info, info)
for (i, ((cost_ref, error_ref), (cost, error))) in enumerate(zip(ref_result, result)):
if verbose:
print('{}: cost: {} <--> {}'.format(i, cost_ref, cost))
print('{}: error: {} <--> {}'.format(i, error_ref, error))
if (i > new_config.start_iteration):
assert_allclose(np.array([cost_ref, error_ref]), np.array([cost, error]), rtol=0.01, atol=1e-05, err_msg='Error: {}'.format(nntxt_idx)) |
def learnable_resizer(inputs, filters=16, num_res_blocks=1, interpolation=INTERPOLATION):
naive_resize = layers.experimental.preprocessing.Resizing(*TARGET_SIZE, interpolation=interpolation)(inputs)
x = layers.Conv2D(filters=filters, kernel_size=7, strides=1, padding='same')(inputs)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(filters=filters, kernel_size=1, strides=1, padding='same')(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.BatchNormalization()(x)
bottleneck = layers.experimental.preprocessing.Resizing(*TARGET_SIZE, interpolation=interpolation)(x)
for _ in range(num_res_blocks):
x = res_block(bottleneck)
x = layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Add()([bottleneck, x])
x = layers.Conv2D(filters=3, kernel_size=7, strides=1, padding='same')(x)
final_resize = layers.Add()([naive_resize, x])
return final_resize |
def prepare_keys_reds(folder_path):
print('Reading image path list ...')
img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=True)))
keys = [v.split('.png')[0] for v in img_path_list]
return (img_path_list, keys) |
def main(flags):
if (flags.model == 'vanilla'):
train_vanilla(flags)
elif (flags.model == 'count'):
train_count(flags)
elif (flags.model == 'curiosity'):
train_curiosity(flags)
elif (flags.model == 'rnd'):
train_rnd(flags)
elif (flags.model == 'ride'):
train_ride(flags)
elif (flags.model == 'cbet'):
train_cbet(flags)
else:
raise NotImplementedError('This model has not been implemented. The available options are: cbet, vanilla, count, curiosity, rnd, ride.') |
def inception_v3(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV3'):
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
with tf.variable_scope(scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_v3_base(inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
aux_logits = end_points['Mixed_6e']
with tf.variable_scope('AuxLogits'):
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, padding='VALID', scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1')
kernel_size = _reduced_kernel_size_for_small_input(aux_logits, [5, 5])
aux_logits = slim.conv2d(aux_logits, depth(768), kernel_size, weights_initializer=trunc_normal(0.01), padding='VALID', scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = slim.conv2d(aux_logits, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, weights_initializer=trunc_normal(0.001), scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = tf.squeeze(aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
with tf.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size))
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return (logits, end_points) |
class RecallMetric(BaseSKLearnMetric):
def _evaluate(self, y_true, y_pred):
return recall_score(y_true, y_pred) |
def register_Ns3DefaultDeleter__Ns3MmWaveControlMessage_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DefaultDeleter< ns3::MmWaveControlMessage > const &', 'arg0')])
cls.add_method('Delete', 'void', [param('ns3::MmWaveControlMessage *', 'object')], is_static=True)
return |
def named_parameters(partition, recurse=True):
params = nn.Module.named_parameters(partition, recurse=recurse)
lookup = partition.lookup
for (k, v) in params:
if (k in lookup):
(yield (lookup[k], v))
else:
assert ('.' in k)
split_idx = k.find('.')
new_k = (lookup[k[:split_idx]] + k[split_idx:])
(yield (new_k, v)) |
def build_sdist(source_dir, sdist_dir, config_settings=None):
if (config_settings is None):
config_settings = {}
(requires, backend, backend_path) = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend, backend_path)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_sdist(config_settings)
env.pip_install(reqs)
return hooks.build_sdist(sdist_dir, config_settings) |
def log_trial(agents, trial_n):
(correct, incorrect, not_finish) = summarize_trial(agents)
log = f'''
BEGIN TRIAL {trial_n}
Trial summary: Correct: {len(correct)}, Incorrect: {len(incorrect)} , Not Finished: {len(not_finish)}
'''
log += ' BEGIN CORRECT AGENTS \n\n'
for agent in correct:
log += (agent._build_agent_prompt() + f'''
Correct answer: {agent.key}
''')
log += ' BEGIN INCORRECT AGENTS \n\n'
for agent in incorrect:
log += (agent._build_agent_prompt() + f'''
Correct answer: {agent.key}
''')
log += ' BEGIN NOT_FINISH AGENTS \n\n'
for agent in not_finish:
log += (agent._build_agent_prompt() + f'''
Correct answer: {agent.key}
''')
return log |
class BM1688Context(BModelContext):
device = Target.BM1688
memmap = memmap
dma_sys = dma_sys
tiu_sys = tiu_sys
local_layout_to_stride = local_layout_to_stride
valid_tag = {1: 0, 2: 1, 3: 2}
base_addr = [(2 ** 32), ( + (2 ** 32)), GET_LMEM_START_ADDR]
def __init__(self) -> None:
super().__init__()
self.decoder = Decoder(self)
_cache()
def opparam_converter(self):
return get_opparam_converter_with_context(self, opparam_converter)
def MemRef(self) -> Type[MemRef]:
return partial(MemRef, context=self)
def get_memory_type(self, reg_address: int) -> MType:
assert (0 <= reg_address < (2 ** 40))
if (reg_address >> 39):
return MType.G
elif ((reg_address >> 23) & 1):
return MType.S
else:
return MType.R
def fix_addr(self, reg_address: int) -> int:
assert (0 <= reg_address < (2 ** 40))
if (reg_address & (1 << 39)):
tag = ((reg_address >> 36) & 7)
fixed_addr = (self.base_addr[self.valid_tag[tag]] + (reg_address & ))
else:
fixed_addr = (reg_address & )
return fixed_addr
def merge_instruction(cls, tiu: List[BaseTpuCmd], dma: List[BaseTpuCmd]):
(main_cmd, inserted_cmd) = (dma, tiu)
def get_end(cmds: List[BaseTpuCmd]):
if (len(cmds) == 0):
return 0
if cls.is_sys(cmds[(- 1)]):
return (- 1)
else:
return len(cmds)
def fix_tgcr_cmd_id_dp(tiu_cmd: List[BaseTpuCmd]):
for (i, v) in enumerate(tiu_cmd):
if isinstance(v.reg, SYS_TR_ACC_reg):
v.cmd_id_dep = (tiu_cmd[(i + 1)].cmd_id_dep if (tiu_cmd[(i + 1)].cmd_id_dep != None) else tiu_cmd[(i + 2)].cmd_id_dep)
fix_tgcr_cmd_id_dp(inserted_cmd[:get_end(inserted_cmd)])
main_id = [(m.cmd_id, m) for m in main_cmd[:get_end(main_cmd)]]
inserted_id = [(i.cmd_id_dep, i) for i in inserted_cmd[:get_end(inserted_cmd)]]
cmd = (main_id + inserted_id)
cmd_sorted = sorted(cmd, key=(lambda x: x[0]))
return [x[1] for x in cmd_sorted]
def is_sys(cls, cmd: BaseTpuCmd):
return isinstance(cmd.reg, (dma_sys, tiu_sys))
def get_runner(self, memory_size: int) -> CModelRunner:
assert self.using_cmodel, '1688 currently only support cmodel mode'
if (self._cmodel_runner is None):
self._cmodel_runner = BM1688Runner(memory_size, self.base_addr)
return self._cmodel_runner |
def preprocess(dataset, remove_from=False):
output_vocab = ['_UNK', '_EOS', '.', 't1', 't2', '=', 'select', 'from', 'as', 'value', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'count', 'group', 'order', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
if remove_from:
output_vocab = ['_UNK', '_EOS', '=', 'select', 'value', ')', '(', 'where', ',', 'count', 'group_by', 'order_by', 'distinct', 'and', 'limit_value', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!=', 'union', 'between', '-', '+', '/']
print('size of output_vocab', len(output_vocab))
print('output_vocab', output_vocab)
print()
data_dir = 'data_clean'
if (dataset == 'spider'):
spider_dir = ('%s/spider/' % data_dir)
database_schema_filename = ('%s/spider/tables.json' % data_dir)
output_dir = ('%s/spider_data' % data_dir)
if remove_from:
output_dir = ('%s/spider_data_removefrom' % data_dir)
(train_database, dev_database) = read_db_split(spider_dir)
elif (dataset == 'sparc'):
sparc_dir = 'data/sparc/'
database_schema_filename = 'data/sparc/tables.json'
output_dir = 'data/sparc_data'
if remove_from:
output_dir = 'data/sparc_data_removefrom'
(train_database, dev_database) = read_db_split(sparc_dir)
elif (dataset == 'cosql'):
cosql_dir = 'data/cosql/'
database_schema_filename = 'data/cosql/tables.json'
output_dir = 'data/cosql_data'
if remove_from:
output_dir = 'data/cosql_data_removefrom'
(train_database, dev_database) = read_db_split(cosql_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
schema_tokens = {}
column_names = {}
database_schemas = {}
print('Reading spider database schema file')
(schema_tokens, column_names, database_schemas) = read_database_schema(database_schema_filename, schema_tokens, column_names, database_schemas)
num_database = len(schema_tokens)
print('num_database', num_database, len(train_database), len(dev_database))
print('total number of schema_tokens / databases:', len(schema_tokens))
output_database_schema_filename = os.path.join(output_dir, 'tables.json')
with open(output_database_schema_filename, 'w') as outfile:
json.dump([v for (k, v) in database_schemas.items()], outfile, indent=4)
if (dataset == 'spider'):
interaction_list = read_spider(spider_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif (dataset == 'sparc'):
interaction_list = read_sparc(sparc_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
elif (dataset == 'cosql'):
interaction_list = read_cosql(cosql_dir, database_schemas, column_names, output_vocab, schema_tokens, remove_from)
print('interaction_list length', len(interaction_list))
train_interaction = []
for database_id in interaction_list:
if (database_id not in dev_database):
train_interaction += interaction_list[database_id]
dev_interaction = []
for database_id in dev_database:
dev_interaction += interaction_list[database_id]
print('train interaction: ', len(train_interaction))
print('dev interaction: ', len(dev_interaction))
write_interaction(train_interaction, 'train', output_dir)
write_interaction(dev_interaction, 'dev', output_dir)
return |
class Conv2DBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_sizes, strides, norm=None, activation=None, padding_mode='replicate'):
super(Conv2DBlock, self).__init__()
padding = ((kernel_sizes // 2) if isinstance(kernel_sizes, int) else ((kernel_sizes[0] // 2), (kernel_sizes[1] // 2)))
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_sizes, strides, padding=padding, padding_mode=padding_mode)
if (activation is None):
nn.init.xavier_uniform_(self.conv2d.weight, gain=nn.init.calculate_gain('linear'))
nn.init.zeros_(self.conv2d.bias)
elif (activation == 'tanh'):
nn.init.xavier_uniform_(self.conv2d.weight, gain=nn.init.calculate_gain('tanh'))
nn.init.zeros_(self.conv2d.bias)
elif (activation == 'lrelu'):
nn.init.kaiming_uniform_(self.conv2d.weight, a=LRELU_SLOPE, nonlinearity='leaky_relu')
nn.init.zeros_(self.conv2d.bias)
elif (activation == 'relu'):
nn.init.kaiming_uniform_(self.conv2d.weight, nonlinearity='relu')
nn.init.zeros_(self.conv2d.bias)
else:
raise ValueError()
self.activation = None
self.norm = None
if (norm is not None):
self.norm = norm_layer2d(norm, out_channels)
if (activation is not None):
self.activation = act_layer(activation)
def forward(self, x):
x = self.conv2d(x)
x = (self.norm(x) if (self.norm is not None) else x)
x = (self.activation(x) if (self.activation is not None) else x)
return x |
def val_collate_fn_visual(batch):
(imgs, pids, camids, _, mask, path) = zip(*batch)
return (torch.stack(imgs, dim=0), pids, camids, torch.stack(mask, dim=0), path) |
class TestMultiClassWrapper(TestCase):
def test_invariance_to_data_types(self):
x = np.array([['a', 'b', 'c'], ['a', 'b', 'c'], ['b', 'b', 'c'], ['b', 'b', 'b'], ['b', 'b', 'b'], ['a', 'b', 'a']])
y = [1, 2, 3, 3, 3, 3]
wrapper = PolynomialWrapper(encoders.TargetEncoder())
result = wrapper.fit_transform(x, y)
th.verify_numeric(result)
x = pd.DataFrame([['a', 'b', 'c'], ['a', 'b', 'c'], ['b', 'b', 'c'], ['b', 'b', 'b'], ['b', 'b', 'b'], ['a', 'b', 'a']], columns=['f1', 'f2', 'f3'])
y = ['bee', 'cat', 'dog', 'dog', 'dog', 'dog']
wrapper = PolynomialWrapper(encoders.TargetEncoder())
result2 = wrapper.fit_transform(x, y)
self.assertTrue((result.to_numpy() == result2.to_numpy()).all(), 'The content should be the same regardless whether we pass Numpy or Pandas data type.')
def test_transform_only_selected(self):
x = pd.DataFrame([['a', 'b', 'c'], ['a', 'a', 'c'], ['b', 'a', 'c'], ['b', 'c', 'b'], ['b', 'b', 'b'], ['a', 'b', 'a']], columns=['f1', 'f2', 'f3'])
y = ['bee', 'cat', 'dog', 'dog', 'dog', 'dog']
wrapper = PolynomialWrapper(encoders.LeaveOneOutEncoder(cols=['f2']))
wrapper.fit(x, y)
result = wrapper.transform(x, y)
self.assertEqual(len(result.columns), 4, 'We expect 2 untouched features + f2 target encoded into 2 features')
wrapper = PolynomialWrapper(encoders.LeaveOneOutEncoder(cols=['f2']))
result2 = wrapper.fit_transform(x, y)
self.assertEqual(len(result2.columns), 4, 'We expect 2 untouched features + f2 target encoded into 2 features')
pd.testing.assert_frame_equal(result, result2)
def test_refit_stateless(self):
x = pd.DataFrame([['a', 'b', 'c'], ['a', 'b', 'c'], ['b', 'b', 'c'], ['b', 'b', 'b'], ['b', 'b', 'b'], ['a', 'b', 'a']], columns=['f1', 'f2', 'f3'])
y1 = ['bee', 'cat', 'dog', 'dog', 'dog', 'dog']
y2 = ['bee', 'cat', 'duck', 'duck', 'duck', 'duck']
wrapper = PolynomialWrapper(encoders.TargetEncoder())
result_first_fit = wrapper.fit_transform(x, y1)
expected_categories_1 = {'cat', 'dog'}
expected_categories_2 = {'cat', 'duck'}
self.assertEqual(set(wrapper.label_encoder.ordinal_encoder.category_mapping[0]['mapping'].index), {'bee', 'cat', 'dog'})
self.assertEqual(set(wrapper.feature_encoders.keys()), expected_categories_1)
result_second_fit = wrapper.fit_transform(x, y2)
self.assertEqual(set(wrapper.label_encoder.ordinal_encoder.category_mapping[0]['mapping'].index), {'bee', 'cat', 'duck'})
self.assertEqual(set(wrapper.feature_encoders.keys()), expected_categories_2) |
def smithform_ZZ(n=128, min=0, max=9, system='sage'):
if (system == 'sage'):
A = random_matrix(ZZ, n, n, x=min, y=(max + 1))
t = cputime()
v = A.elementary_divisors()
return cputime(t)
elif (system == 'magma'):
code = ('\nn := %s;\nA := MatrixAlgebra(IntegerRing(), n)![Random(%s,%s) : i in [1..n^2]];\nt := Cputime();\nK := ElementaryDivisors(A);\ns := Cputime(t);\n' % (n, min, max))
if verbose:
print(code)
magma.eval(code)
return float(magma.eval('s'))
else:
raise ValueError(('unknown system "%s"' % system)) |
class AperiodicSemigroups(CategoryWithAxiom):
def extra_super_categories(self):
return [Semigroups().HTrivial()] |
def test_srp_randomsubspaces():
stream = ConceptDriftStream(position=1000, width=20, random_state=1)
learner = StreamingRandomPatchesClassifier(n_estimators=3, subspace_mode='percentage', training_method='randomsubspaces', random_state=1)
y_expected = np.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], dtype=np.int)
run_prequential_supervised(stream, learner, max_samples=2000, n_wait=40, y_expected=y_expected) |
_cached
def _tutte_polynomial_internal(G, x, y, edge_selector, cache=None):
if (not G.num_edges()):
return x.parent().one()
def recursive_tp(graph=None):
if (graph is None):
graph = G
return _tutte_polynomial_internal(graph, x, y, edge_selector, cache=cache)
with removed_loops(G) as loops:
if loops:
return ((y ** len(loops)) * recursive_tp())
uG = underlying_graph(G)
em = edge_multiplicities(G)
d = list(em.values())
def yy(start, end):
return sum(((y ** i) for i in range(start, (end + 1))))
if G.is_forest():
return prod(((x + yy(1, (d_i - 1))) for d_i in d))
(blocks, cut_vertices) = G.blocks_and_cut_vertices()
if (len(blocks) > 1):
return prod([recursive_tp(G.subgraph(block)) for block in blocks])
components = G.connected_components_number()
edge = edge_selector(G)
unlabeled_edge = edge[:2]
with removed_edge(G, edge):
if (G.connected_components_number() > components):
with contracted_edge(G, unlabeled_edge):
return (x * recursive_tp())
if (uG.num_verts() == uG.num_edges()):
n = len(d)
result = 0
for i in range((n - 2)):
term = (prod(((x + yy(1, (d_j - 1))) for d_j in d[(i + 1):])) * prod((yy(0, (d_k - 1)) for d_k in d[:i])))
result += term
result += ((x + yy(1, ((d[(- 1)] + d[(- 2)]) - 1))) * prod((yy(0, (d_i - 1)) for d_i in d[:(- 2)])))
return result
ear = Ear.find_ear(uG)
if (ear is not None):
if (ear.is_cycle and (ear.vertices == G.vertices(sort=True))):
return (y + sum(((x ** i) for i in range(1, ear.s))))
else:
with ear.removed_from(G):
result = sum(((prod(((x + yy(1, (em[e] - 1))) for e in ear.unlabeled_edges[(i + 1):])) * prod((yy(0, (em[e] - 1)) for e in ear.unlabeled_edges[:i]))) for i in range(len(ear.unlabeled_edges))))
result *= recursive_tp()
with contracted_edge(G, [ear.end_points[0], ear.end_points[(- 1)]]):
result += (prod((yy(0, (em[e] - 1)) for e in ear.unlabeled_edges)) * recursive_tp())
return result
if (len(em) == 1):
return (x + sum(((y ** i) for i in range(1, em[unlabeled_edge]))))
else:
with removed_multiedge(G, unlabeled_edge):
result = recursive_tp()
with contracted_edge(G, unlabeled_edge):
result += (sum(((y ** i) for i in range(em[unlabeled_edge]))) * recursive_tp())
return result |
class DatasetWriter(object):
def __init__(self, mujoco=False, goal=False):
self.mujoco = mujoco
self.goal = goal
self.data = self._reset_data()
self._num_samples = 0
def _reset_data(self):
data = {'observations': [], 'actions': [], 'terminals': [], 'rewards': []}
if self.mujoco:
data['infos/qpos'] = []
data['infos/qvel'] = []
if self.goal:
data['infos/goal'] = []
return data
def __len__(self):
return self._num_samples
def append_data(self, s, a, r, done, goal=None, mujoco_env_data=None):
self._num_samples += 1
self.data['observations'].append(s)
self.data['actions'].append(a)
self.data['rewards'].append(r)
self.data['terminals'].append(done)
if self.goal:
self.data['infos/goal'].append(goal)
if self.mujoco:
self.data['infos/qpos'].append(mujoco_env_data.qpos.ravel().copy())
self.data['infos/qvel'].append(mujoco_env_data.qvel.ravel().copy())
def write_dataset(self, fname, max_size=None, compression='gzip'):
np_data = {}
for k in self.data:
if (k == 'terminals'):
dtype = np.bool_
else:
dtype = np.float32
data = np.array(self.data[k], dtype=dtype)
if (max_size is not None):
data = data[:max_size]
np_data[k] = data
dataset = h5py.File(fname, 'w')
for k in np_data:
dataset.create_dataset(k, data=np_data[k], compression=compression)
dataset.close() |
class TestDataset(torch.utils.data.Dataset):
def __init__(self, args):
self.args = args
self.size = (self.w, self.h) = args['size']
self.video_root = args['video_root']
self.mask_root = args['mask_root']
self.flow_root = args['flow_root']
self.load_flow = args['load_flow']
if self.load_flow:
assert os.path.exists(self.flow_root)
self.video_names = sorted(os.listdir(self.mask_root))
self.video_dict = {}
self.frame_dict = {}
for v in self.video_names:
frame_list = sorted(os.listdir(os.path.join(self.video_root, v)))
v_len = len(frame_list)
self.video_dict[v] = v_len
self.frame_dict[v] = frame_list
self._to_tensors = transforms.Compose([Stack(), ToTorchFormatTensor()])
self.file_client = FileClient('disk')
def __len__(self):
return len(self.video_names)
def __getitem__(self, index):
video_name = self.video_names[index]
selected_index = list(range(self.video_dict[video_name]))
frames = []
masks = []
(flows_f, flows_b) = ([], [])
for idx in selected_index:
frame_list = self.frame_dict[video_name]
frame_path = os.path.join(self.video_root, video_name, frame_list[idx])
img_bytes = self.file_client.get(frame_path, 'input')
img = imfrombytes(img_bytes, float32=False)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, self.size, interpolation=cv2.INTER_LINEAR)
img = Image.fromarray(img)
frames.append(img)
mask_path = os.path.join(self.mask_root, video_name, (str(idx).zfill(5) + '.png'))
mask = Image.open(mask_path).resize(self.size, Image.NEAREST).convert('L')
mask = np.asarray(mask)
m = np.array((mask > 0)).astype(np.uint8)
m = cv2.dilate(m, cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)), iterations=4)
mask = Image.fromarray((m * 255))
masks.append(mask)
if ((len(frames) <= (len(selected_index) - 1)) and self.load_flow):
current_n = frame_list[idx][:(- 4)]
next_n = frame_list[(idx + 1)][:(- 4)]
flow_f_path = os.path.join(self.flow_root, video_name, f'{current_n}_{next_n}_f.flo')
flow_b_path = os.path.join(self.flow_root, video_name, f'{next_n}_{current_n}_b.flo')
flow_f = flowread(flow_f_path, quantize=False)
flow_b = flowread(flow_b_path, quantize=False)
flow_f = resize_flow(flow_f, self.h, self.w)
flow_b = resize_flow(flow_b, self.h, self.w)
flows_f.append(flow_f)
flows_b.append(flow_b)
frames_PIL = [np.array(f).astype(np.uint8) for f in frames]
frame_tensors = ((self._to_tensors(frames) * 2.0) - 1.0)
mask_tensors = self._to_tensors(masks)
if self.load_flow:
flows_f = np.stack(flows_f, axis=(- 1))
flows_b = np.stack(flows_b, axis=(- 1))
flows_f = torch.from_numpy(flows_f).permute(3, 2, 0, 1).contiguous().float()
flows_b = torch.from_numpy(flows_b).permute(3, 2, 0, 1).contiguous().float()
if self.load_flow:
return (frame_tensors, mask_tensors, flows_f, flows_b, video_name, frames_PIL)
else:
return (frame_tensors, mask_tensors, 'None', 'None', video_name) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def requires_gloo():
return unittest.skipUnless(c10d.is_gloo_available(), 'c10d was not compiled with the Gloo backend') |
class IterableDataset(Dataset[T_co], metaclass=_DataPipeMeta):
functions: Dict[(str, Callable)] = {}
reduce_ex_hook: Optional[Callable] = None
def __iter__(self) -> Iterator[T_co]:
raise NotImplementedError
def __add__(self, other: Dataset[T_co]):
return ChainDataset([self, other])
def __getattr__(self, attribute_name):
if (attribute_name in IterableDataset.functions):
function = functools.partial(IterableDataset.functions[attribute_name], self)
return function
else:
raise AttributeError
def __reduce_ex__(self, *args, **kwargs):
if (IterableDataset.reduce_ex_hook is not None):
try:
return IterableDataset.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
def set_reduce_ex_hook(cls, hook_fn):
if ((IterableDataset.reduce_ex_hook is not None) and (hook_fn is not None)):
raise Exception('Attempt to override existing reduce_ex_hook')
IterableDataset.reduce_ex_hook = hook_fn |
_properties
class Memlet(object):
volume = SymbolicProperty(default=0, desc='The exact number of elements moved using this memlet, or the maximum number if dynamic=True (with 0 as unbounded)')
dynamic = Property(default=False, dtype=bool, desc='Is the number of elements moved determined at runtime (e.g., data dependent)')
subset = SubsetProperty(allow_none=True, desc='Subset of elements to move from the data attached to this edge.')
other_subset = SubsetProperty(allow_none=True, desc='Subset of elements after reindexing to the data not attached to this edge (e.g., for offsets and reshaping).')
data = DataProperty(desc='Data descriptor attached to this memlet')
wcr = LambdaProperty(allow_none=True, desc='If set, defines a write-conflict resolution lambda function. The syntax of the lambda function receives two elements: `current` value and `new` value, and returns the value after resolution')
debuginfo = DebugInfoProperty(desc='Line information to track source and generated code')
wcr_nonatomic = Property(dtype=bool, default=False, desc='If True, always generates non-conflicting (non-atomic) writes in resulting code')
allow_oob = Property(dtype=bool, default=False, desc='Bypass out-of-bounds validation')
def __init__(self, expr: Optional[str]=None, data: Optional[str]=None, subset: Union[(str, subsets.Subset, None)]=None, other_subset: Union[(str, subsets.Subset, None)]=None, volume: Union[(int, str, symbolic.SymbolicType, None)]=None, dynamic: bool=False, wcr: Union[(str, ast.AST, None)]=None, debuginfo: Optional[dtypes.DebugInfo]=None, wcr_nonatomic: bool=False, allow_oob: bool=False):
self._sdfg = None
self._state = None
self._edge = None
self._is_data_src = None
self.data = None
self.subset = None
self.other_subset = None
if (expr is not None):
self._parse_memlet_from_str(expr)
self.data = (self.data or data)
self.subset = (self.subset or subset)
self.other_subset = (self.other_subset or other_subset)
if (volume is not None):
self.volume = volume
elif (self.subset is not None):
self.volume = self.subset.num_elements()
elif (self.other_subset is not None):
self.volume = self.other_subset.num_elements()
else:
self.volume = 1
self.dynamic = dynamic
self.wcr = wcr
self.wcr_nonatomic = wcr_nonatomic
self.debuginfo = debuginfo
self.allow_oob = allow_oob
def from_memlet(memlet: 'Memlet') -> 'Memlet':
sbs = (subsets.Range(memlet.subset.ndrange()) if (memlet.subset is not None) else None)
osbs = (subsets.Range(memlet.other_subset.ndrange()) if (memlet.other_subset is not None) else None)
result = Memlet(data=memlet.data, subset=sbs, other_subset=osbs, volume=memlet.volume, dynamic=memlet.dynamic, wcr=memlet.wcr, debuginfo=copy(memlet.debuginfo), wcr_nonatomic=memlet.wcr_nonatomic, allow_oob=memlet.allow_oob)
result._is_data_src = memlet._is_data_src
return result
def to_json(self):
attrs = dace.serialize.all_properties_to_json(self)
if (self.src_subset is not None):
attrs['src_subset'] = self.src_subset.to_json()
else:
attrs['src_subset'] = None
if (self.dst_subset is not None):
attrs['dst_subset'] = self.dst_subset.to_json()
else:
attrs['dst_subset'] = None
attrs['is_data_src'] = self._is_data_src
attrs['num_accesses'] = (str(self.volume) if (not self.dynamic) else (- 1))
return {'type': 'Memlet', 'attributes': attrs}
def from_json(json_obj, context=None):
ret = Memlet()
dace.serialize.set_properties_from_json(ret, json_obj, context=context, ignore_properties={'src_subset', 'dst_subset', 'num_accesses', 'is_data_src'})
if ('is_data_src' in json_obj['attributes']):
ret._is_data_src = json_obj['attributes']['is_data_src']
if context:
ret._sdfg = context['sdfg']
ret._state = context['sdfg_state']
return ret
def __deepcopy__(self, memo):
node = object.__new__(Memlet)
node._volume = dcpy(self._volume, memo=memo)
node._dynamic = self._dynamic
node._subset = dcpy(self._subset, memo=memo)
node._other_subset = dcpy(self._other_subset, memo=memo)
node._data = dcpy(self._data, memo=memo)
node._wcr = dcpy(self._wcr, memo=memo)
node._wcr_nonatomic = dcpy(self._wcr_nonatomic, memo=memo)
node._debuginfo = dcpy(self._debuginfo, memo=memo)
node._wcr_nonatomic = self._wcr_nonatomic
node._allow_oob = self._allow_oob
node._is_data_src = self._is_data_src
node._sdfg = None
node._state = None
node._edge = None
return node
def is_empty(self) -> bool:
return ((self.data is None) and (self.src_subset is None) and (self.dst_subset is None))
def num_accesses(self):
return self.volume
_accesses.setter
def num_accesses(self, value):
self.volume = value
def simple(data, subset_str, wcr_str=None, other_subset_str=None, wcr_conflict=True, num_accesses=None, debuginfo=None, dynamic=False):
result = Memlet()
if isinstance(subset_str, subsets.Subset):
result.subset = subset_str
else:
result.subset = SubsetProperty.from_string(subset_str)
result.dynamic = dynamic
if (num_accesses is not None):
if (num_accesses == (- 1)):
result.dynamic = True
result.volume = 0
else:
result.volume = num_accesses
else:
result.volume = result._subset.num_elements()
if (wcr_str is not None):
if isinstance(wcr_str, ast.AST):
result.wcr = wcr_str
else:
result.wcr = LambdaProperty.from_string(wcr_str)
if (other_subset_str is not None):
if isinstance(other_subset_str, subsets.Subset):
result.other_subset = other_subset_str
else:
result.other_subset = SubsetProperty.from_string(other_subset_str)
else:
result.other_subset = None
if hasattr(data, 'data'):
result.data = data.data
else:
result.data = data
result.wcr_nonatomic = (not wcr_conflict)
return result
def _parse_from_subexpr(self, expr: str):
if (expr[(- 1)] != ']'):
if (not dtypes.validate_name(expr)):
raise SyntaxError(('Invalid memlet syntax "%s"' % expr))
return (expr, None)
(arrname, subset_str) = expr[:(- 1)].split('[')
if (not dtypes.validate_name(arrname)):
raise SyntaxError(('Invalid array name "%s" in memlet' % arrname))
return (arrname, SubsetProperty.from_string(subset_str))
def _parse_memlet_from_str(self, expr: str):
expr = expr.strip()
if ('->' not in expr):
(self.data, self.subset) = self._parse_from_subexpr(expr)
return
(src_expr, dst_expr) = expr.split('->')
src_expr = src_expr.strip()
dst_expr = dst_expr.strip()
if (('[' not in src_expr) and (not dtypes.validate_name(src_expr))):
raise SyntaxError('Expression without data name not yet allowed')
(self.data, self.subset) = self._parse_from_subexpr(src_expr)
self.other_subset = SubsetProperty.from_string(dst_expr)
def try_initialize(self, sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', edge: 'dace.sdfg.graph.MultiConnectorEdge'):
from dace.sdfg.nodes import AccessNode, CodeNode
self._sdfg = sdfg
self._state = state
self._edge = edge
if (isinstance(edge.src, CodeNode) and isinstance(edge.dst, CodeNode) and (self.volume == 0)):
self.volume = 1
try:
path = state.memlet_path(edge)
except (ValueError, StopIteration):
return
is_data_src = False
is_data_dst = False
if isinstance(path[0].src, AccessNode):
if (path[0].src.data == self._data):
is_data_src = True
if isinstance(path[(- 1)].dst, AccessNode):
if (path[(- 1)].dst.data == self._data):
is_data_dst = True
if (is_data_src and is_data_dst):
if (self._is_data_src is None):
self._is_data_src = True
else:
self._is_data_src = is_data_src
if ((self.data is not None) and (self.subset is None)):
self.subset = subsets.Range.from_array(sdfg.arrays[self.data])
def get_src_subset(self, edge: 'dace.sdfg.graph.MultiConnectorEdge', state: 'dace.sdfg.SDFGState'):
self.try_initialize(state.parent, state, edge)
return self.src_subset
def get_dst_subset(self, edge: 'dace.sdfg.graph.MultiConnectorEdge', state: 'dace.sdfg.SDFGState'):
self.try_initialize(state.parent, state, edge)
return self.dst_subset
def from_array(dataname, datadesc, wcr=None):
rng = subsets.Range.from_array(datadesc)
return Memlet.simple(dataname, rng, wcr_str=wcr)
def __hash__(self):
return hash((self.data, self.volume, self.src_subset, self.dst_subset, str(self.wcr)))
def __eq__(self, other):
return all(((self.data == other.data), (self.volume == other.volume), (self.src_subset == other.src_subset), (self.dst_subset == other.dst_subset), (self.wcr == other.wcr)))
def replace(self, repl_dict):
repl_to_intermediate = {}
repl_to_final = {}
for symbol in repl_dict:
if (str(symbol) != str(repl_dict[symbol])):
intermediate = symbolic.symbol(('__dacesym_' + str(symbol)))
repl_to_intermediate[symbolic.symbol(symbol)] = intermediate
repl_to_final[intermediate] = repl_dict[symbol]
if (len(repl_to_intermediate) > 0):
if ((self.volume is not None) and symbolic.issymbolic(self.volume)):
self.volume = self.volume.subs(repl_to_intermediate)
self.volume = self.volume.subs(repl_to_final)
if (self.subset is not None):
self.subset.replace(repl_to_intermediate)
self.subset.replace(repl_to_final)
if (self.other_subset is not None):
self.other_subset.replace(repl_to_intermediate)
self.other_subset.replace(repl_to_final)
def num_elements(self):
if self.subset:
return self.subset.num_elements()
elif self.other_subset:
return self.other_subset.num_elements()
return 0
def bounding_box_size(self):
if self.src_subset:
return self.src_subset.bounding_box_size()
elif self.dst_subset:
return self.dst_subset.bounding_box_size()
return []
def src_subset(self):
if (self._is_data_src is not None):
return (self.subset if self._is_data_src else self.other_subset)
return self.subset
_subset.setter
def src_subset(self, new_src_subset):
if (self._is_data_src is not None):
if self._is_data_src:
self.subset = new_src_subset
else:
self.other_subset = new_src_subset
else:
self.subset = new_src_subset
def dst_subset(self):
if (self._is_data_src is not None):
return (self.other_subset if self._is_data_src else self.subset)
return self.other_subset
_subset.setter
def dst_subset(self, new_dst_subset):
if (self._is_data_src is not None):
if self._is_data_src:
self.other_subset = new_dst_subset
else:
self.subset = new_dst_subset
else:
self.other_subset = new_dst_subset
def validate(self, sdfg, state):
if ((self.data is not None) and (self.data not in sdfg.arrays)):
raise KeyError(('Array "%s" not found in SDFG' % self.data))
def used_symbols(self, all_symbols: bool, edge=None) -> Set[str]:
result = set()
view_edge = False
if all_symbols:
result |= set(map(str, self.volume.free_symbols))
elif (edge is not None):
view_edge = False
from dace.sdfg import nodes
if (isinstance(edge.dst, nodes.CodeNode) or isinstance(edge.src, nodes.CodeNode)):
view_edge = True
elif ((edge.dst_conn == 'views') and isinstance(edge.dst, nodes.AccessNode)):
view_edge = True
elif ((edge.src_conn == 'views') and isinstance(edge.src, nodes.AccessNode)):
view_edge = True
if (not view_edge):
if self.src_subset:
result |= self.src_subset.free_symbols
if self.dst_subset:
result |= self.dst_subset.free_symbols
else:
if self.src_subset:
for (rb, _, _) in self.src_subset.ndrange():
if symbolic.issymbolic(rb):
result |= set(map(str, rb.free_symbols))
if self.dst_subset:
for (rb, _, _) in self.dst_subset.ndrange():
if symbolic.issymbolic(rb):
result |= set(map(str, rb.free_symbols))
return result
def free_symbols(self) -> Set[str]:
return self.used_symbols(all_symbols=True)
def get_free_symbols_by_indices(self, indices_src: List[int], indices_dst: List[int]) -> Set[str]:
result = set()
result |= set(map(str, self.volume.free_symbols))
if self.src_subset:
result |= self.src_subset.get_free_symbols_by_indices(indices_src)
if self.dst_subset:
result |= self.dst_subset.get_free_symbols_by_indices(indices_dst)
return result
def get_stride(self, sdfg: 'dace.sdfg.SDFG', map: 'dace.sdfg.nodes.Map', dim: int=(- 1)) -> 'dace.symbolic.SymExpr':
if (self.data is None):
return symbolic.pystr_to_symbolic('0')
param = symbolic.symbol(map.params[dim])
array = sdfg.arrays[self.data]
curr = self.subset.at(([0] * len(array.strides)), array.strides)
next = curr.subs(param, (param + map.range[dim][2]))
return (next - curr).simplify()
def __label__(self, sdfg, state):
if (self.data is None):
return self._label(None)
return self._label(sdfg.arrays[self.data].shape)
def __str__(self):
return self._label(None)
def _label(self, shape):
result = ''
if (self.data is not None):
result = self.data
if (self.subset is None):
return result
num_elements = self.subset.num_elements()
if self.dynamic:
result += '(dyn) '
elif (self.volume != num_elements):
result += ('(%s) ' % SymbolicProperty.to_string(self.volume))
arrayNotation = True
try:
if ((shape is not None) and (reduce(operator.mul, shape, 1) == 1)):
if all(((s == 0) for s in self.subset.min_element())):
arrayNotation = False
except TypeError:
pass
if arrayNotation:
result += ('[%s]' % str(self.subset))
if ((self.wcr is not None) and (str(self.wcr) != '')):
redtype = detect_reduction_type(self.wcr)
if (redtype == dtypes.ReductionType.Custom):
wcrstr = unparse(ast.parse(self.wcr).body[0].value.body)
else:
wcrstr = str(redtype)
wcrstr = wcrstr[(wcrstr.find('.') + 1):]
result += (' (CR: %s)' % wcrstr)
if (self.other_subset is not None):
if (self._is_data_src is False):
result += (' <- [%s]' % str(self.other_subset))
else:
result += (' -> [%s]' % str(self.other_subset))
return result
def __repr__(self):
return (('Memlet (' + self.__str__()) + ')') |
class SymplecticDerivationLieAlgebra(InfinitelyGeneratedLieAlgebra, IndexedGenerators):
def __init__(self, R, g):
if (g < 4):
raise ValueError('g must be at least 4')
cat = LieAlgebras(R).WithBasis().Graded()
self._g = g
d = Family(NonNegativeIntegers(), (lambda n: Partitions(n, min_length=2, max_part=(2 * g))))
indices = DisjointUnionEnumeratedSets(d)
InfinitelyGeneratedLieAlgebra.__init__(self, R, index_set=indices, category=cat)
IndexedGenerators.__init__(self, indices, sorting_key=self._basis_key)
def _basis_key(self, x):
return (len(x), x)
def _repr_term(self, m):
g = self._g
def label(i):
return ('a{}'.format(i) if (i <= g) else 'b{}'.format((i - g)))
return '*'.join((label(i) for i in reversed(m)))
def _latex_term(self, m):
g = self._g
def label(i):
return ('a_{{{}}}'.format(i) if (i <= g) else 'b_{{{}}}'.format((i - g)))
return ' '.join((label(i) for i in reversed(m)))
def _unicode_art_term(self, m):
from sage.typeset.unicode_art import unicode_art, unicode_subscript
g = self._g
def label(i):
return ('a{}'.format(unicode_subscript(i)) if (i <= g) else 'b{}'.format(unicode_subscript((i - g))))
return unicode_art(''.join((label(i) for i in reversed(m))))
def _repr_(self):
return 'Symplectic derivation Lie algebra of rank {} over {}'.format(self._g, self.base_ring())
def degree_on_basis(self, x):
return (len(x) - 2)
def bracket_on_basis(self, x, y):
g = self._g
ret = {}
one = self.base_ring().one()
for (i, xi) in enumerate(x):
for (j, yj) in enumerate(y):
if (((xi <= g) and (yj <= g)) or ((xi > g) and (yj > g))):
continue
if ((xi <= g) and (yj > g)):
if (xi != (yj - g)):
continue
m = _Partitions(sorted((((x[:i] + x[(i + 1):]) + y[:j]) + y[(j + 1):]), reverse=True))
if (m in ret):
ret[m] += one
else:
ret[m] = one
else:
if ((xi - g) != yj):
continue
m = _Partitions(sorted((((x[:i] + x[(i + 1):]) + y[:j]) + y[(j + 1):]), reverse=True))
if (m in ret):
ret[m] -= one
else:
ret[m] = (- one)
return self._from_dict(ret, remove_zeros=True)
def _an_element_(self):
d = self.monomial
return ((d(_Partitions([2, 1])) - (self.base_ring().an_element() * d(_Partitions([5, 2, 2, 1])))) + d(_Partitions([((2 * self._g) - 1), (self._g + 1), 2, 1, 1])))
def some_elements(self):
d = self.monomial
g = self._g
return [d(_Partitions([2, 1])), d(_Partitions([(g + 3), (g + 1)])), d(_Partitions([2, 1, 1])), d(_Partitions([((2 * g) - 1), ((2 * g) - 2)])), d(_Partitions([((2 * g) - 2), (g - 1), 1])), self.an_element()]
class Element(LieAlgebraElement):
pass |
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
Fixed = enum_auto() |
def get_AA_golden_ratio():
global AA_golden_ratio
if (AA_golden_ratio is None):
AA_golden_ratio_nf = NumberField((((ZZX_x ** 2) - ZZX_x) - 1), 'phi')
AA_golden_ratio_generator = AlgebraicGenerator(AA_golden_ratio_nf, ANRoot((((AAPoly.gen() ** 2) - AAPoly.gen()) - 1), RIF(1.618, 1.6181)))
AA_golden_ratio = AlgebraicReal(ANExtensionElement(AA_golden_ratio_generator, AA_golden_ratio_nf.gen()))
return AA_golden_ratio |
def load_pretrained_feature_extractor(feature_extractor_name, device):
net_test = PreActResNet18()
net_test = net_test.to(device)
net_test.load_state_dict(torch.load(('checkpoint/' + feature_extractor_name)))
net_test.eval()
return net_test |
def rand_augment_transform(config_str, hparams):
magnitude = _MAX_LEVEL
num_layers = 2
weight_idx = None
transforms = _RAND_TRANSFORMS
config = config_str.split('-')
assert (config[0] == 'rand')
config = config[1:]
for c in config:
cs = re.split('(\\d.*)', c)
if (len(cs) < 2):
continue
(key, val) = cs[:2]
if (key == 'mstd'):
hparams.setdefault('magnitude_std', float(val))
elif (key == 'inc'):
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif (key == 'm'):
magnitude = int(val)
elif (key == 'n'):
num_layers = int(val)
elif (key == 'w'):
weight_idx = int(val)
else:
assert False, 'Unknown RandAugment config section'
ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms)
choice_weights = (None if (weight_idx is None) else _select_rand_weights(weight_idx))
return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) |
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
if ((train_cfg is not None) or (test_cfg is not None)):
warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning)
assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field '
assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field '
return SEGMENTORS.build(cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) |
def evaluate(args, model, tokenizer, mode, prefix=''):
eval_task = args.task_name
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, mode)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
if (args.output_mode == 'classification'):
preds = np.argmax(preds, axis=1)
elif (args.output_mode == 'regression'):
preds = np.squeeze(preds)
result = {'pearson': eval_sts(out_label_ids, preds), 'num': len(eval_dataset)}
result['loss'] = eval_loss
output_eval_file = os.path.join(args.output_dir, (args.result_prefix + '{}_results.txt'.format((mode if (mode != 'dev') else 'eval'))))
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in result.keys():
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return (result, preds) |
class OpenExecutor(ActionExecutor):
def __init__(self, close: bool):
self.close = close
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_openable(state, node, info):
new_node = node.copy()
new_node.states.discard((State.OPEN if self.close else State.CLOSED))
new_node.states.add((State.CLOSED if self.close else State.OPEN))
(yield state.change_state([ChangeNode(new_node)]))
def check_openable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo):
if ((Property.CAN_OPEN not in node.properties) and (node.class_name not in ['desk', 'window'])):
info.error('{} can not be opened', node)
return False
if (not _is_character_close_to(state, node)):
char_node = _get_character_node(state)
info.error('{} is not close to {}', char_node, node)
return False
if ((not self.close) and (_find_free_hand(state) is None)):
char_node = _get_character_node(state)
info.error('{} does not have a free hand', char_node)
return False
s = (State.OPEN if self.close else State.CLOSED)
if (s not in node.states):
info.error('{} is not {}', node, s.name.lower())
return False
if ((not self.close) and (State.ON in node.states)):
info.error('{} is still on'.format(node))
return False
return True |
class KerasFakeQuantExporterBaseTest(ABC):
def run_test(self):
self.model = self.get_model()
(self.exportable_model, _) = mct.ptq.keras_post_training_quantization_experimental(in_model=self.model, core_config=mct.core.CoreConfig(quantization_config=self.get_quantization_config()), representative_data_gen=self.__get_repr_dataset, target_platform_capabilities=self.get_tpc(), new_experimental_exporter=True)
(_, self.fq_model_file_path) = tempfile.mkstemp(DEFAULT_KERAS_EXPORT_EXTENTION)
mct.exporter.keras_export_model(model=self.exportable_model, save_model_path=self.fq_model_file_path, quantization_format=mct.exporter.QuantizationFormat.FAKELY_QUANT)
self.loaded_model = keras.models.load_model(self.fq_model_file_path)
inputs = next(self.__get_repr_dataset())
loaded_model_outputs = self.loaded_model(inputs)
exportable_model_outputs = self.exportable_model(inputs)
if (not isinstance(loaded_model_outputs, list)):
loaded_model_outputs = [loaded_model_outputs]
if (not isinstance(exportable_model_outputs, list)):
exportable_model_outputs = [exportable_model_outputs]
for (loaded_out, exportable_out) in zip(loaded_model_outputs, exportable_model_outputs):
diff = np.sum(np.abs((loaded_out - exportable_out)))
assert (diff == 0), f'Expected exportable model and exported model to have identical outputs but sum abs diff is {diff}'
self.run_checks()
os.remove(self.fq_model_file_path)
def get_input_shape(self):
return [(16, 16, 3)]
def get_tpc(self):
return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
def get_quantization_config(self):
return QuantizationConfig()
def __get_repr_dataset(self):
(yield [np.random.randn(*((1,) + shape)) for shape in self.get_input_shape()])
def get_model(self):
raise Exception(f'Exporter test must implement get_model method')
def run_checks(self):
raise Exception(f'Exporter test must implement run_checks method') |
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue, cluster_shape, threadblock_shape, stages, opclass, persistent=False):
def run(self):
element_A = cutlass.float16
element_B = cutlass.float16
inst_shape = ([1, 1, 1] if (opclass == cutlass.OpClass.Simt) else None)
warp_count = ([2, 2, 1] if (opclass == cutlass.OpClass.Simt) else None)
math_inst = MathInstruction(instruction_shape=inst_shape, element_a=element_A, element_b=element_B, element_accumulator=element_accumulator, opcode_class=opclass, math_operation=MathOperation.multiply_add)
tile_description = TileDescription(threadblock_shape=threadblock_shape, cluster_shape=cluster_shape, stages=stages, warp_count=warp_count, math_instruction=math_inst, persistent=persistent)
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=90, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal'))
if persistent:
suffix = '_persistent'
else:
suffix = ''
name = name_fn(layouts, alignments, element_output, element_accumulator, element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
setattr(cls, name, run)
return run |
def uniform_init(module: nn.Module, a: float=0, b: float=1, bias: float=0) -> None:
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.uniform_(module.weight, a, b)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
def add_generation_args(parser):
group = parser.add_argument_group('Generation')
add_common_eval_args(group)
group.add_argument('--beam', default=5, type=int, metavar='N', help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N', help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N', help='generate sequences of maximum length ax + b, where x is the source length')
group.add_argument('--max-len-b', default=200, type=int, metavar='N', help='generate sequences of maximum length ax + b, where x is the source length')
group.add_argument('--min-len', default=1, type=float, metavar='N', help='minimum generation length')
group.add_argument('--match-source-len', default=False, action='store_true', help='generations should match the source length')
group.add_argument('--no-early-stop', action='store_true', help='deprecated')
group.add_argument('--unnormalized', action='store_true', help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true', help="don't use BeamableMM in attention layers")
group.add_argument('--lenpen', default=1, type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float, help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None, help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true', help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS', help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N', help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true', help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=(- 1), type=int, metavar='PS', help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=(- 1.0), type=float, metavar='PS', help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--temperature', default=1.0, type=float, metavar='N', help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=(- 1), type=int, metavar='N', help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N', help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--print-alignment', action='store_true', help='if set, uses attention feedback to compute and print alignment to source tokens')
return group |
def test_f():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
f = function_symbol('f', x)
g = function_symbol('g', x)
assert (f != g)
f = function_symbol('f', x)
g = function_symbol('f', x)
assert (f == g)
f = function_symbol('f', x, y)
g = function_symbol('f', y, x)
assert (f != g)
f = function_symbol('f', x, y)
g = function_symbol('f', x, y)
assert (f == g) |
class CudaTensorHolder(pycuda_driver.PointerHolderBase):
def __init__(self, t):
super().__init__()
self.gpudata = t.data_ptr() |
def test_attribute_in_ranged_loop():
a = np.random.rand(20, 20)
regression = (a * 5)
doublefor_jit(a)
assert np.allclose(a, regression) |
def val_seg(model, dataset_loader, criterion=None, num_classes=21, device='cuda'):
model.eval()
inter_meter = AverageMeter()
union_meter = AverageMeter()
batch_time = AverageMeter()
end = time.time()
miou_class = MIOU(num_classes=num_classes)
if criterion:
losses = AverageMeter()
with torch.no_grad():
for (i, (inputs, target)) in enumerate(dataset_loader):
inputs = inputs.to(device=device)
target = target.to(device=device)
outputs = model(inputs)
if criterion:
if (device == 'cuda'):
loss = criterion(outputs, target).mean()
if isinstance(outputs, (list, tuple)):
target_dev = outputs[0].device
outputs = gather(outputs, target_device=target_dev)
else:
loss = criterion(outputs, target)
losses.update(loss.item(), inputs.size(0))
(inter, union) = miou_class.get_iou(outputs, target)
inter_meter.update(inter)
union_meter.update(union)
batch_time.update((time.time() - end))
end = time.time()
if ((i % 10) == 0):
iou = (inter_meter.sum / (union_meter.sum + 1e-10))
miou = (iou.mean() * 100)
loss_ = (losses.avg if (criterion is not None) else 0)
print_log_message(('[%d/%d]\t\tBatch Time:%.4f\t\tLoss:%.4f\t\tmiou:%.4f' % (i, len(dataset_loader), batch_time.avg, loss_, miou)))
iou = (inter_meter.sum / (union_meter.sum + 1e-10))
miou = (iou.mean() * 100)
print_info_message('Mean IoU: {0:.2f}'.format(miou))
if criterion:
return (miou, losses.avg)
else:
return (miou, 0) |
def FibonacciTree(n):
T = Graph(name=('Fibonacci-Tree-%d' % n))
if (n == 1):
T.add_vertex(0)
if (n < 2):
return T
from sage.combinat.combinat import fibonacci_sequence
F = list(fibonacci_sequence((n + 2)))
s = (1.618 ** ((n / 1.618) - 1.618))
pos = {}
def fib(level, node, y):
pos[node] = (node, y)
if (level < 2):
return
level -= 1
y -= s
diff = F[level]
T.add_edge(node, (node - diff))
if (level == 1):
pos[(node - diff)] = (node, y)
return
T.add_edge(node, (node + diff))
fib(level, (node - diff), y)
fib((level - 1), (node + diff), y)
T.add_vertices(range(sum(F[:(- 1)])))
fib(n, (F[(n + 1)] - 1), 0)
T.set_pos(pos)
return T |
def _dataset_info(txt_labels):
with open(txt_labels, 'r') as f:
images_list = f.readlines()
file_names = []
labels = []
for row in images_list:
row = row.split(' ')
file_names.append(row[0])
labels.append(int(row[1]))
return (file_names, labels) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.