code stringlengths 101 5.91M |
|---|
class AfterExecution(CurrentOperationMixin, ExecutionEvent):
method: str
path: str
relative_path: str
verbose_name: str
status: Status
data_generation_method: list[DataGenerationMethod]
result: SerializedTestResult
elapsed_time: float
correlation_id: str
thread_id: int = field(default_factory=threading.get_ident)
hypothesis_output: list[str] = field(default_factory=list)
def from_result(cls, result: TestResult, status: Status, elapsed_time: float, hypothesis_output: list[str], operation: APIOperation, data_generation_method: list[DataGenerationMethod], correlation_id: str) -> AfterExecution:
return cls(method=operation.method.upper(), path=operation.full_path, relative_path=operation.path, verbose_name=operation.verbose_name, result=SerializedTestResult.from_test_result(result), status=status, elapsed_time=elapsed_time, hypothesis_output=hypothesis_output, data_generation_method=data_generation_method, correlation_id=correlation_id) |
class BaseRCA(BaseModel):
def __init__(self):
self.logger = get_logger(self.__class__.__name__)
def train(self, **kwargs):
def find_root_causes(self, **kwargs) -> RCAResults: |
def to_format(arg, size=None):
if isinstance(arg, FormatObject):
return arg
else:
r = StringFormatObject(str(arg))
if (size is not None):
r.size = size
return r |
class TestUtils(unittest.TestCase):
def setUp(self) -> None:
self.straight_path = {'start_pose': [421., 1087., 2.], 'end_pose': [391., 1100., 2.], 'shape': 'LSR', 'radius': 999.999, 'segment_length': [0., 28., 3.]}
self.left_path = {'start_pose': [391., 1100., 2.], 'end_pose': [372., 1093., (- 2.)], 'shape': 'LSL', 'radius': 14., 'segment_length': [22., 0., 0.]}
self.right_path = {'start_pose': [367., 1097., 1.], 'end_pose': [392., 1112., (- 0.)], 'shape': 'RSR', 'radius': 16., 'segment_length': [4.e-05, 6., 26.]}
self.straight_lane = [self.straight_path]
self.curved_lane = [self.straight_path, self.left_path]
self.right_lane = [self.right_path]
def test_discretize_straight_path(self):
discrete_path = arcline_path_utils.discretize(self.straight_path, 10)
answer = np.array([(421., 1087., 2.), (413., 1091., 2.), (406., 1094., 2.), (399., 1097., 2.), (391., 1100., 2.)])
np.testing.assert_allclose(answer, discrete_path)
def test_discretize_curved_path(self):
discrete_path = arcline_path_utils.discretize(self.left_path, 2)
answer = np.array([(391., 1100., 2.), (389., 1101., 2.), (388., 1101., 2.), (386., 1101., 3.), (384., 1101., (- 3.)), (382., 1101., (- 2.)), (380., 1100., (- 2.)), (379., 1099., (- 2.)), (377., 1098., (- 2.)), (375., 1097., (- 2.)), (374., 1096., (- 2.)), (373., 1094., (- 2.)), (372., 1093., (- 2.))])
np.testing.assert_allclose(answer, discrete_path)
def test_discretize_curved_lane(self):
discrete_path = arcline_path_utils.discretize_lane(self.curved_lane, 5)
answer = np.array([(421., 1087., 2.), (417., 1089., 2.), (412., 1091., 2.), (408., 1093., 2.), (404., 1095., 2.), (400., 1096., 2.), (395., 1098., 2.), (391., 1100., 2.), (391., 1100., 2.), (387., 1101., 3.), (382., 1101., (- 2.)), (378., 1099., (- 2.)), (375., 1096., (- 2.)), (372., 1093., (- 2.))])
np.testing.assert_allclose(answer, discrete_path)
def test_length_of_lane(self):
self.assertEqual(arcline_path_utils.length_of_lane(self.straight_lane), sum(self.straight_path['segment_length']))
self.assertEqual(arcline_path_utils.length_of_lane(self.right_lane), sum(self.right_path['segment_length']))
self.assertEqual(arcline_path_utils.length_of_lane(self.curved_lane), (sum(self.straight_path['segment_length']) + sum(self.left_path['segment_length'])))
def test_project_pose_to_straight_lane(self):
theta = 2.
end_pose = ((421. + (10 * math.cos(theta))), (1087. + (10 * math.sin(theta))), theta)
(pose, s) = arcline_path_utils.project_pose_to_lane(end_pose, self.straight_lane)
np.testing.assert_allclose(np.array(pose).astype('int'), np.array(end_pose).astype('int'))
self.assertTrue((abs((s - 10)) <= 0.5))
def test_project_pose_not_close_to_lane(self):
pose = (362, 1092, 1.15)
(pose_on_lane, s) = arcline_path_utils.project_pose_to_lane(pose, self.right_lane)
self.assertListEqual(list(pose_on_lane), self.right_path['start_pose'])
self.assertEqual(s, 0)
def test_project_pose_to_curved_lane(self):
theta = 2.
end_pose_1 = ((421. + (10 * math.cos(theta))), (1087. + (10 * math.sin(theta))), theta)
end_pose_2 = (381, 1100, (- 2.76))
(pose, s) = arcline_path_utils.project_pose_to_lane(end_pose_1, self.curved_lane)
np.testing.assert_allclose(np.array(pose).astype('int'), np.array(end_pose_1).astype('int'))
self.assertTrue((abs((s - 10)) <= 0.5))
(pose_2, s_2) = arcline_path_utils.project_pose_to_lane(end_pose_2, self.curved_lane)
np.testing.assert_allclose(np.array(pose_2[:2]).astype('int'), np.array([380, 1100]))
self.assertTrue((abs((s_2 - 44)) <= 0.5))
def test_get_curvature_straight_lane(self):
curvature = arcline_path_utils.get_curvature_at_distance_along_lane(15, self.straight_lane)
self.assertEqual(curvature, 0)
def test_curvature_curved_lane(self):
curvature = arcline_path_utils.get_curvature_at_distance_along_lane(53, self.curved_lane)
self.assertEqual(curvature, (1 / self.left_path['radius'])) |
class RPNModule(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(RPNModule, self).__init__()
self.cfg = cfg.clone()
anchor_generator = make_anchor_generator(cfg)
rpn_head = registry.RPN_HEADS[cfg.MODEL.RPN.RPN_HEAD]
head = rpn_head(cfg, in_channels, anchor_generator.num_anchors_per_location()[0])
rpn_box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))
box_selector_train = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=True)
box_selector_test = make_rpn_postprocessor(cfg, rpn_box_coder, is_train=False)
loss_evaluator = make_rpn_loss_evaluator(cfg, rpn_box_coder)
self.anchor_generator = anchor_generator
self.head = head
self.box_selector_train = box_selector_train
self.box_selector_test = box_selector_test
self.loss_evaluator = loss_evaluator
def forward(self, images, features, targets=None):
(objectness, rpn_box_regression) = self.head(features)
anchors = self.anchor_generator(images, features)
if self.training:
return self._forward_train(anchors, objectness, rpn_box_regression, targets)
else:
return self._forward_test(anchors, objectness, rpn_box_regression)
def _forward_train(self, anchors, objectness, rpn_box_regression, targets):
if self.cfg.MODEL.RPN_ONLY:
boxes = anchors
else:
with torch.no_grad():
boxes = self.box_selector_train(anchors, objectness, rpn_box_regression, targets)
(loss_objectness, loss_rpn_box_reg) = self.loss_evaluator(anchors, objectness, rpn_box_regression, targets)
losses = {'loss_objectness': loss_objectness, 'loss_rpn_box_reg': loss_rpn_box_reg}
return (boxes, losses)
def _forward_test(self, anchors, objectness, rpn_box_regression):
boxes = self.box_selector_test(anchors, objectness, rpn_box_regression)
if self.cfg.MODEL.RPN_ONLY:
inds = [box.get_field('objectness').sort(descending=True)[1] for box in boxes]
boxes = [box[ind] for (box, ind) in zip(boxes, inds)]
return (boxes, {}) |
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)
m.weight.data.normal_(0, 0.1)
return m |
def get_membership(labels: np.ndarray, dtype=bool, n_labels: Optional[int]=None) -> sparse.csr_matrix:
n: int = len(labels)
if (n_labels is None):
shape = (n, (max(labels) + 1))
else:
shape = (n, n_labels)
ix = (labels >= 0)
data = np.ones(ix.sum())
row = np.arange(n)[ix]
col = labels[ix]
return sparse.csr_matrix((data, (row, col)), shape=shape, dtype=dtype) |
_properties
class ArrayElimination(ppl.Pass):
CATEGORY: str = 'Simplification'
def modifies(self) -> ppl.Modifies:
return (ppl.Modifies.Descriptors | ppl.Modifies.AccessNodes)
def should_reapply(self, modified: ppl.Modifies) -> bool:
return (modified & ppl.Modifies.AccessNodes)
def depends_on(self):
return {ap.StateReachability, ap.FindAccessStates}
def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[(str, Any)]) -> Optional[Set[str]]:
result: Set[str] = set()
reachable: Dict[(SDFGState, Set[SDFGState])] = pipeline_results[ap.StateReachability.__name__][sdfg.sdfg_id]
access_sets: Dict[(str, Set[SDFGState])] = pipeline_results[ap.FindAccessStates.__name__][sdfg.sdfg_id]
try:
state_order = list(cfg.stateorder_topological_sort(sdfg))
except KeyError:
return None
for state in reversed(state_order):
removable_data: Set[str] = set((s for s in access_sets if ((state in access_sets[s]) and (not ((access_sets[s] & reachable[state]) - {state})))))
access_nodes: Dict[(str, List[nodes.AccessNode])] = defaultdict(list)
for node in state.nodes():
if isinstance(node, nodes.AccessNode):
access_nodes[node.data].append(node)
removed_nodes = self.merge_access_nodes(state, access_nodes, (lambda n: (state.in_degree(n) == 0)))
removed_nodes |= self.merge_access_nodes(state, access_nodes, (lambda n: (state.out_degree(n) == 0)))
access_nodes = {k: [n for n in v if (n not in removed_nodes)] for (k, v) in access_nodes.items()}
removed_nodes |= self.remove_redundant_views(sdfg, state, access_nodes)
removed_nodes |= self.remove_redundant_copies(sdfg, state, removable_data, access_nodes)
for (aname, anodes) in access_nodes.items():
if (len((set(anodes) - removed_nodes)) == 0):
access_sets[aname].remove(state)
if removed_nodes:
result.update({n.data for n in removed_nodes})
for (aname, desc) in list(sdfg.arrays.items()):
if ((not desc.transient) or isinstance(desc, data.Scalar)):
continue
if ((aname not in access_sets) or (not access_sets[aname])):
sdfg.remove_data(aname, validate=False)
result.add(aname)
return (result or None)
def report(self, pass_retval: Set[str]) -> str:
return f'Eliminated {len(pass_retval)} arrays: {pass_retval}.'
def merge_access_nodes(self, state: SDFGState, access_nodes: Dict[(str, List[nodes.AccessNode])], condition: Callable[([nodes.AccessNode], bool)]):
removed_nodes: Set[nodes.AccessNode] = set()
for nodeset in access_nodes.values():
if (len(nodeset) > 1):
first_node = nodeset[0]
if (not condition(first_node)):
continue
for node in nodeset[1:]:
if (not condition(node)):
continue
for edge in state.all_edges(node):
if (edge.dst is node):
state.add_edge(edge.src, edge.src_conn, first_node, edge.dst_conn, edge.data)
else:
state.add_edge(first_node, edge.src_conn, edge.dst, edge.dst_conn, edge.data)
state.remove_node(node)
removed_nodes.add(node)
return removed_nodes
def remove_redundant_views(self, sdfg: SDFG, state: SDFGState, access_nodes: Dict[(str, List[nodes.AccessNode])]):
removed_nodes: Set[nodes.AccessNode] = set()
xforms = [RemoveSliceView()]
state_id = sdfg.node_id(state)
for nodeset in access_nodes.values():
for anode in list(nodeset):
for xform in xforms:
candidate = {type(xform).view: anode}
xform.setup_match(sdfg, sdfg.sdfg_id, state_id, candidate, 0, override=True)
if xform.can_be_applied(state, 0, sdfg):
xform.apply(state, sdfg)
removed_nodes.add(anode)
nodeset.remove(anode)
return removed_nodes
def remove_redundant_copies(self, sdfg: SDFG, state: SDFGState, removable_data: Set[str], access_nodes: Dict[(str, List[nodes.AccessNode])]):
removed_nodes: Set[nodes.AccessNode] = set()
state_id = sdfg.node_id(state)
xforms_first: List[SingleStateTransformation] = [RedundantWriteSlice(), UnsqueezeViewRemove(), RedundantArray()]
xforms_second: List[SingleStateTransformation] = [RedundantReadSlice(), SqueezeViewRemove(), RedundantSecondArray()]
removed = {1}
while removed:
removed = set()
for aname in removable_data:
if (aname not in access_nodes):
continue
for anode in access_nodes[aname]:
if (anode in removed_nodes):
continue
if (anode not in state.nodes()):
removed_nodes.add(anode)
continue
if (state.out_degree(anode) == 1):
succ = state.successors(anode)[0]
if isinstance(succ, nodes.AccessNode):
for xform in xforms_first:
candidate = {type(xform).in_array: anode, type(xform).out_array: succ}
xform.setup_match(sdfg, sdfg.sdfg_id, state_id, candidate, 0, override=True)
if xform.can_be_applied(state, 0, sdfg):
ret = xform.apply(state, sdfg)
if (ret is not None):
continue
removed_nodes.add(anode)
removed.add(anode)
break
if (anode in removed_nodes):
continue
if (state.in_degree(anode) == 1):
pred = state.predecessors(anode)[0]
if isinstance(pred, nodes.AccessNode):
for xform in xforms_second:
candidate = {type(xform).in_array: pred, type(xform).out_array: anode}
xform.setup_match(sdfg, sdfg.sdfg_id, state_id, candidate, 0, override=True)
if xform.can_be_applied(state, 0, sdfg):
ret = xform.apply(state, sdfg)
if (ret is not None):
continue
removed_nodes.add(anode)
removed.add(anode)
break
return removed_nodes |
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
cls.add_method('Cleanup', 'void', [], is_static=True)
return |
def filter_answers_open_close(train_qa_pairs, val_qa_pairs, min_occurence):
occurence_open = {}
occurence_close = {}
qa_pairs = train_qa_pairs.append(val_qa_pairs)
qa_pairs['answer'] = qa_pairs['answer'].apply((lambda x: str(x)))
qa_pairs_open = qa_pairs[(qa_pairs['answer_type'] == 'OPEN')]
qa_pairs_close = qa_pairs[(qa_pairs['answer_type'] == 'CLOSED')]
for (id, row) in qa_pairs_open.iterrows():
gtruth = row['answer']
gtruth = ' '.join(gtruth.split())
if (gtruth not in occurence_open):
occurence_open[gtruth] = set()
occurence_open[gtruth].add(row['qid'])
for answer in list(occurence_open):
if (len(occurence_open[answer]) < min_occurence):
occurence_open.pop(answer)
print(('Num of open answers that appear >= %d times: %d' % (min_occurence, len(occurence_open))))
for (id, row) in qa_pairs_close.iterrows():
gtruth = row['answer']
gtruth = ' '.join(gtruth.split())
if (gtruth not in occurence_close):
occurence_close[gtruth] = set()
occurence_close[gtruth].add(row['qid'])
for answer in list(occurence_close):
if (len(occurence_close[answer]) < min_occurence):
occurence_close.pop(answer)
print(('Num of close answers that appear >= %d times: %d' % (min_occurence, len(occurence_close))))
return (occurence_open, occurence_close) |
class MM(ReidBaseDataModule):
dataset_dir = 'ReID_format'
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
self.dataset_dir = osp.join(cfg.DATASETS.ROOT_DIR, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'bounding_box_test')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
def setup(self):
self._check_before_run()
transforms_base = ReidTransforms(self.cfg)
(train, train_dict) = self._process_dir(self.train_dir, relabel=True)
self.train_dict = train_dict
self.train_list = train
self.train = BaseDatasetLabelledPerPid(train_dict, transforms_base.build_transforms(is_train=True), self.num_instances, self.cfg.DATALOADER.USE_RESAMPLING)
(query, query_dict) = self._process_dir(self.query_dir, relabel=False)
(gallery, gallery_dict) = self._process_dir(self.gallery_dir, relabel=False)
self.query_list = query
self.gallery_list = gallery
self.val = BaseDatasetLabelled((query + gallery), transforms_base.build_transforms(is_train=False))
self._print_dataset_statistics(train, query, gallery)
(num_query_pids, num_query_imgs, num_query_cams) = self._get_imagedata_info(query)
(num_train_pids, num_train_imgs, num_train_cams) = self._get_imagedata_info(train)
self.num_query = len(query)
self.num_classes = num_train_pids
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset_dict = defaultdict(list)
dataset = []
for (idx, img_path) in enumerate(img_paths):
(pid, camid) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid, idx))
dataset_dict[pid].append((img_path, pid, camid, idx))
return (dataset, dataset_dict) |
def script_call(function_name_at_script_name: str, script_handle_or_type: int, ints=(), floats=(), strings=(), bytes='') -> Tuple[(List[int], List[float], List[str], str)]:
return sim.simExtCallScriptFunction(function_name_at_script_name, script_handle_or_type, list(ints), list(floats), list(strings), bytes) |
_function
def file_hash(filename):
path = os.path.normpath(filename)
prefix = ('%d:%s' % (len(path), path)).encode('UTF-8')
m = hashlib.md5(prefix)
with open(path, 'rb') as f:
data = f.read(65000)
while data:
m.update(data)
data = f.read(65000)
return m.hexdigest() |
_file_in_work_dir(['script_name'])
_low_level_step
def undo_edit_script(script_name, work_dir='.', **kwargs):
backup_files = glob.glob(os.path.join(work_dir, 'backup', f'{script_name}_*'))
if (len(backup_files) == 0):
raise EnvException('There is no change to undo.')
try:
backup_files.sort()
backup_file = backup_files[(- 1)]
shutil.copyfile(backup_file, os.path.join(work_dir, script_name))
os.remove(backup_file)
new_content = open(os.path.join(work_dir, script_name)).read()
observation = (f'''Content of {script_name} after undo the most recent edit:
''' + new_content)
return observation
except:
raise EnvException(f'Cannot undo the edit of file name {script_name}. Check the file name again.') |
class Relation(BratBase):
def __init__(self, id_, doc_name, rela_type, args):
super(Relation, self).__init__(id_, rela_type, doc_name)
self.args = args
self.abs_char_start = 0
self.abs_char_end = 0
def init_args(self, entity_map):
self.args = sorted([entity_map[arg_id] for arg_id in self.args], key=(lambda x: x.type_), reverse=1)
self.abs_char_start = self.args[0].abs_char_start
self.abs_char_end = self.args[(- 1)].abs_char_end
def get_stable_id(self):
args = tuple(sorted([arg.get_stable_id() for arg in self.args]))
return (self.type_, self.doc_name, args)
def clone(self, ignore_attributes=False):
arg1 = self.args[0].clone(ignore_attributes)
arg2 = self.args[1].clone(ignore_attributes)
c = Relation(self.id_, self.doc_name, self.type_, [arg1, arg2])
c.abs_char_start = self.args[0].abs_char_start
c.abs_char_end = self.args[(- 1)].abs_char_end
return c
def __getitem__(self, key):
arg_types = [entity.type for entity in self.args]
assert (key in arg_types)
return self.args[arg_types.index(key)]
def __hash__(self):
v = (self.type_, self.doc_name, tuple(self.args))
return hash(v)
def __str__(self):
args = [str(arg) for arg in self.args]
return '{}( {} )'.format(self.type_, ', '.join(args)) |
def get_caption(path_to_image):
headers = {'Ocp-Apim-Subscription-Key': API_KEY, 'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Description', 'language': 'en'}
payload = open(path_to_image, 'rb').read()
response = requests.post(ANALYZE_URL, headers=headers, params=params, data=payload)
results = json.loads(response.content)
caption = results['description']['captions'][0]['text']
return caption |
class DistributedDataParallelCPU(Module):
def __init__(self, module):
super(DistributedDataParallelCPU, self).__init__()
self.module = module
self.sync_parameters()
def allreduce_params():
if self.needs_reduction:
self.needs_reduction = False
buckets = defaultdict(list)
for param in self.module.parameters():
if (param.requires_grad and (param.grad is not None)):
tp = type(param.data)
buckets[tp].append(param)
for bucket in buckets.values():
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for (buf, synced) in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def sync_parameters(self):
for param in self.module.parameters():
dist.broadcast(param.data, 0)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs) |
class Data():
def __init__(self, survey, dobs=None, relative_error=None, noise_floor=None, standard_deviation=None, **kwargs):
super().__init__(**kwargs)
self.survey = survey
if (dobs is None):
dobs = np.full(survey.nD, np.nan)
self.dobs = dobs
self.relative_error = relative_error
self.noise_floor = noise_floor
if (standard_deviation is not None):
if ((relative_error is not None) or (noise_floor is not None)):
warnings.warn('Setting the standard_deviation overwrites the relative_error and noise_floor', stacklevel=2)
self.standard_deviation = standard_deviation
if ((standard_deviation is None) and (relative_error is None) and (noise_floor is None)):
self.standard_deviation = 0.0
def survey(self):
return self._survey
def survey(self, value):
self._survey = validate_type('survey', value, BaseSurvey, cast=False)
def dobs(self):
return self._dobs
def dobs(self, value):
self._dobs = validate_ndarray_with_shape('dobs', value, shape=(self.survey.nD,), dtype=(float, complex))
def relative_error(self):
return self._relative_error
_error.setter
def relative_error(self, value):
if (value is not None):
try:
value = validate_float('relative_error', value)
value = np.full(self.survey.nD, value)
except TypeError:
pass
value = validate_ndarray_with_shape('relative_error', value, shape=(self.survey.nD,))
if np.any((value < 0.0)):
raise ValueError('relative_error must be positive.')
self._relative_error = value
def noise_floor(self):
return self._noise_floor
_floor.setter
def noise_floor(self, value):
if (value is not None):
try:
value = validate_float('noise_floor', value)
value = np.full(self.survey.nD, value)
except TypeError:
pass
value = validate_ndarray_with_shape('noise_floor', value, shape=(self.survey.nD,))
if np.any((value < 0.0)):
raise ValueError('noise_floor must be positive.')
self._noise_floor = value
def standard_deviation(self):
if ((self.relative_error is None) and (self.noise_floor is None)):
raise TypeError('The relative_error and / or noise_floor must be set before asking for uncertainties. Alternatively, the standard_deviation can be set directly')
uncert = np.zeros(self.nD)
if (self.relative_error is not None):
uncert += ((self.relative_error * np.absolute(self.dobs)) ** 2)
if (self.noise_floor is not None):
uncert += (self.noise_floor ** 2)
return np.sqrt(uncert)
_deviation.setter
def standard_deviation(self, value):
self.relative_error = np.zeros(self.nD)
self.noise_floor = value
def nD(self):
return len(self.dobs)
def shape(self):
return self.dobs.shape
def index_dictionary(self):
if (getattr(self, '_index_dictionary', None) is None):
if (self.survey is None):
raise Exception('To set or get values by source-receiver pairs, a survey must first be set. `data.survey = survey`')
self._index_dictionary = {}
for src in self.survey.source_list:
self._index_dictionary[src] = {}
(indBot, indTop) = (0, 0)
for src in self.survey.source_list:
for rx in src.receiver_list:
indTop += rx.nD
self._index_dictionary[src][rx] = np.arange(indBot, indTop)
indBot += rx.nD
return self._index_dictionary
def __setitem__(self, key, value):
index = self.index_dictionary[key[0]][key[1]]
self.dobs[index] = mkvc(value)
def __getitem__(self, key):
index = self.index_dictionary[key[0]][key[1]]
return self.dobs[index]
def tovec(self):
return self.dobs
def fromvec(self, v):
v = mkvc(v)
self.dobs = v |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3FdReader_methods(root_module, root_module['ns3::DefaultDeleter< ns3::FdReader >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3SystemThread_methods(root_module, root_module['ns3::DefaultDeleter< ns3::SystemThread >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TapBridgeHelper_methods(root_module, root_module['ns3::TapBridgeHelper'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker'])
register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TapBridge_methods(root_module, root_module['ns3::TapBridge'])
register_Ns3TapBridgeFdReader_methods(root_module, root_module['ns3::TapBridgeFdReader'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_char___star___Long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned char *, long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return |
def main(in_filepath, out_dir, threshold=10.0, clip_duration_threshold=[60.0], clip_duration=10.0, force_duration=True, num_clips=3, force_num_clips=True, anneal_factor=1.2, sampling='random', cut_random_clips=None, calc_diversity_with_sum=False, **kwargs):
if (not os.path.isfile(in_filepath)):
sys.exit('No such file: {}'.format(in_filepath))
if (not os.path.isdir(out_dir)):
os.makedirs(out_dir)
orig_clip_duration = get_clip_duration(in_filepath)
for (i, constraint) in enumerate(sorted(clip_duration_threshold)):
if (orig_clip_duration <= constraint):
num_clips = math.ceil((num_clips / (2 ** (len(clip_duration_threshold) - 1))))
break
if (num_clips < 1):
num_clips = 1
threshold = float(threshold)
done = False
valid_clips = []
while (not done):
(sb, sb_score) = run_sbd(in_filepath, threshold)
if (len(sb) != 0):
valid_clips = get_valid_clips(sb, clip_duration, force_duration)
done = ((len(valid_clips) >= num_clips) or (not force_num_clips) or (threshold >= 100.0))
if (not done):
threshold = min((float(anneal_factor) * threshold), 100.0)
if (len(valid_clips) == 0):
du = get_clip_duration(in_filepath)
sb = [0, du]
if force_duration:
delta = (0.5 * ((sb[1] - sb[0]) - clip_duration))
sb = [(sb[0] + delta), ((sb[0] + delta) + clip_duration)]
valid_clips = [sb]
num_clips = len(valid_clips)
def save_clip(clip, idx=None):
if (idx is not None):
out_filepath = os.path.join(out_dir, '{}_{:02d}.mp4'.format(get_filename(in_filepath), idx))
else:
out_filepath = os.path.join(out_dir, '{}_{:03d}.mp4'.format(get_filename(in_filepath), int(clip[0])))
if (not os.path.isfile(out_filepath)):
extract_clip(clip, in_filepath, out_filepath)
return out_filepath
def compute_save_delete(path_as, clip_b):
path_b = save_clip(clip_b)
sim = 0
for path_a in path_as:
sim += compute_perceptual_similarity(path_a, path_b)
os.remove(path_b)
return (sim, path_b)
if (force_num_clips and (len(valid_clips) > num_clips) and (sampling == 'random')):
valid_clips = sorted(random.sample(valid_clips, num_clips))
if ((sampling == 'diversity') and (cut_random_clips is not None)):
assert (cut_random_clips >= num_clips), 'cut_random clips should be larger than num_clips'
valid_clips = sorted(random.sample(valid_clips, num_clips))[:cut_random_clips]
if (sampling == 'diversity_greedy'):
random.shuffle(valid_clips)
out_filepaths = []
if (len(valid_clips) <= num_clips):
for (idx, clip) in enumerate(valid_clips):
out_filepath = save_clip(clip)
out_filepaths.append(out_filepath)
num_clips = len(valid_clips)
saved_clips = valid_clips
else:
current_clips = [valid_clips[0]]
other_clips = valid_clips[1:]
out_filepaths = [save_clip(current_clips[(- 1)])]
for i in range((num_clips - 1)):
min_sim = .0
for (i, other_clip) in enumerate(other_clips):
(sim, other_path) = compute_save_delete(out_filepaths, other_clip)
if (sim == 0):
clip_candidate = i
break
if (sim < min_sim):
clip_candidate = i
min_sim = sim
current_clip = other_clips[clip_candidate]
current_clips = [*current_clips, current_clip]
out_filepaths.append(save_clip(current_clips[(- 1)]))
other_clips.pop(clip_candidate)
saved_clips = current_clips
return (saved_clips, out_filepaths)
out_filepaths = []
for clip in valid_clips:
out_filepath = save_clip(clip)
out_filepaths.append(out_filepath)
keep_idx = list(range(len(valid_clips)))
if (force_num_clips and (len(valid_clips) > num_clips)):
if (sampling == 'diversity'):
n = len(valid_clips)
sim = np.zeros((n, n))
random.shuffle(out_filepaths)
for i in range((n - 1)):
for j in range((i + 1), n):
sim[(i, j)] = compute_perceptual_similarity(out_filepaths[i], out_filepaths[j])
sim = (sim + sim.T)
keep_idx = calc_diversity(sim, num_clips, calc_diversity_with_sum)
[os.remove(out_filepaths[i]) for i in range(n) if (i not in keep_idx)]
elif (sampling in ['random_then_diversity', 'random1_then_diversity']):
random.shuffle(out_filepaths)
random_clips = math.ceil((num_clips / 2))
if (sampling == 'random1_then_diversity'):
random_clips = 1
diversity_clips = (num_clips - random_clips)
keep_idx = list(range(random_clips))
n = len(valid_clips)
sim = np.zeros((random_clips, (n - random_clips)))
for i in range(random_clips):
for j in range((n - random_clips)):
sim[(i, j)] = compute_perceptual_similarity(out_filepaths[i], out_filepaths[(j + random_clips)])
diversity_keep_idx = (np.argsort(np.sum(sim, axis=0))[:diversity_clips] + random_clips)
keep_idx = [*keep_idx, *list(diversity_keep_idx)]
[os.remove(out_filepaths[i]) for i in range(n) if (i not in keep_idx)]
saved_clips = [valid_clips[idx] for idx in keep_idx]
out_filepaths = [out_filepaths[idx] for idx in keep_idx]
return (saved_clips, out_filepaths) |
class GradualStyleEncoder(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(GradualStyleEncoder, self).__init__()
assert (num_layers in [50, 100, 152]), 'num_layers should be 50,100, or 152'
assert (mode in ['ir', 'ir_se']), 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if (mode == 'ir'):
unit_module = bottleneck_IR
elif (mode == 'ir_se'):
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(opts.input_nc, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride))
self.body = Sequential(*modules)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
self.transformerlayer_coarse = TransformerDecoderLayer(d_model=512, nhead=4, dim_feedforward=1024)
self.transformerlayer_medium = TransformerDecoderLayer(d_model=512, nhead=4, dim_feedforward=1024)
self.transformerlayer_fine = TransformerDecoderLayer(d_model=512, nhead=4, dim_feedforward=1024)
self.z = nn.Parameter(torch.randn(1, 18, 512))
def _upsample_add(self, x, y):
(_, _, H, W) = y.size()
return (F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y)
def forward(self, x, query):
x = self.input_layer(x)
query = query.permute(1, 0, 2)
modulelist = list(self.body._modules.values())
for (i, l) in enumerate(modulelist):
x = l(x)
if (i == 6):
c1 = x
elif (i == 20):
c2 = x
if (i == 23):
c3 = x
p2 = self._upsample_add(c3, self.latlayer1(c2))
p1 = self._upsample_add(p2, self.latlayer2(c1))
p1 = p1.flatten(2).permute(2, 0, 1)
p2 = p2.flatten(2).permute(2, 0, 1)
c3 = c3.flatten(2).permute(2, 0, 1)
query_coarse = self.transformerlayer_coarse(query, c3)
query_medium = self.transformerlayer_medium(query_coarse, p2)
query_fine = self.transformerlayer_fine(query_medium, p1)
codes = query_fine.permute(1, 0, 2)
return codes |
def pesq_score(utts_r, utts_g, h):
pesq_score = Parallel(n_jobs=30)((delayed(eval_pesq)(utts_r[i].squeeze().cpu().numpy(), utts_g[i].squeeze().cpu().numpy(), h.sampling_rate) for i in range(len(utts_r))))
pesq_score = np.mean(pesq_score)
return pesq_score |
class WikiEnv(gym.Env):
def __init__(self):
super().__init__()
self.page = None
self.obs = None
self.lookup_keyword = None
self.lookup_list = None
self.lookup_cnt = None
self.steps = 0
self.answer = None
self.observation_space = self.action_space = textSpace()
self.search_time = 0
self.num_searches = 0
def _get_obs(self):
return self.obs
def _get_info(self):
return {'steps': self.steps, 'answer': self.answer}
def reset(self, seed=None, return_info=False, options=None):
self.obs = 'Interact with Wikipedia using search[], lookup[], and finish[].\n'
self.page = None
self.lookup_keyword = None
self.lookup_list = None
self.lookup_cnt = None
self.steps = 0
self.answer = None
observation = self._get_obs()
info = self._get_info()
return ((observation, info) if return_info else observation)
def construct_lookup_list(self, keyword):
if (self.page is None):
return []
paragraphs = self.page.split('\n')
paragraphs = [p.strip() for p in paragraphs if p.strip()]
sentences = []
for p in paragraphs:
sentences += p.split('. ')
sentences = [(s.strip() + '.') for s in sentences if s.strip()]
parts = sentences
parts = [p for p in parts if (keyword.lower() in p.lower())]
return parts
def get_page_obs(page):
paragraphs = page.split('\n')
paragraphs = [p.strip() for p in paragraphs if p.strip()]
sentences = []
for p in paragraphs:
sentences += p.split('. ')
sentences = [(s.strip() + '.') for s in sentences if s.strip()]
return ' '.join(sentences[:5])
def search_step(self, entity):
entity_ = entity.replace(' ', '+')
search_url = f'
old_time = time.time()
response_text = requests.get(search_url).text
self.search_time += (time.time() - old_time)
self.num_searches += 1
soup = BeautifulSoup(response_text, features='html.parser')
result_divs = soup.find_all('div', {'class': 'mw-search-result-heading'})
if result_divs:
self.result_titles = [clean_str(div.get_text().strip()) for div in result_divs]
self.obs = f'Could not find {entity}. Similar: {self.result_titles[:5]}.'
else:
page = [p.get_text().strip() for p in (soup.find_all('p') + soup.find_all('ul'))]
if any((('may refer to:' in p) for p in page)):
self.search_step((('[' + entity) + ']'))
else:
self.page = ''
for p in page:
if (len(p.split(' ')) > 2):
self.page += clean_str(p)
if (not p.endswith('\n')):
self.page += '\n'
self.obs = self.get_page_obs(self.page)
self.lookup_keyword = self.lookup_list = self.lookup_cnt = None
def step(self, action):
reward = 0
done = False
action = action.strip()
if (self.answer is not None):
done = True
return (self.obs, reward, done, self._get_info())
if (action.startswith('search[') and action.endswith(']')):
entity = action[len('search['):(- 1)]
self.search_step(entity)
elif (action.startswith('lookup[') and action.endswith(']')):
keyword = action[len('lookup['):(- 1)]
if (self.lookup_keyword != keyword):
self.lookup_keyword = keyword
self.lookup_list = self.construct_lookup_list(keyword)
self.lookup_cnt = 0
if (self.lookup_cnt >= len(self.lookup_list)):
self.obs = 'No more results.\n'
else:
self.obs = (f'(Result {(self.lookup_cnt + 1)} / {len(self.lookup_list)}) ' + self.lookup_list[self.lookup_cnt])
self.lookup_cnt += 1
elif (action.startswith('finish[') and action.endswith(']')):
answer = action[len('finish['):(- 1)]
self.answer = answer
done = True
self.obs = f'''Episode finished, reward = {reward}
'''
elif (action.startswith('think[') and action.endswith(']')):
self.obs = 'Nice thought.'
else:
self.obs = 'Invalid action: {}'.format(action)
self.steps += 1
return (self.obs, reward, done, self._get_info())
def get_time_info(self):
speed = ((self.search_time / self.num_searches) if self.num_searches else 0)
return {'call_speed': speed, 'call_time': self.search_time, 'num_calls': self.num_searches} |
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True, index_urls=None):
self.prompting = prompting
self.index_urls = index_urls
self.passwords = {}
self._credentials_to_save = None
def _get_index_url(self, url):
if ((not url) or (not self.index_urls)):
return None
for u in self.index_urls:
prefix = (remove_auth_from_url(u).rstrip('/') + '/')
if url.startswith(prefix):
return u
return None
def _get_new_credentials(self, original_url, allow_netrc=True, allow_keyring=True):
(url, netloc, url_user_password) = split_auth_netloc_from_url(original_url)
(username, password) = url_user_password
if ((username is not None) and (password is not None)):
logger.debug('Found credentials in url for %s', netloc)
return url_user_password
index_url = self._get_index_url(url)
if index_url:
index_info = split_auth_netloc_from_url(index_url)
if index_info:
(index_url, _, index_url_user_password) = index_info
logger.debug('Found index url %s', index_url)
if (index_url and (index_url_user_password[0] is not None)):
(username, password) = index_url_user_password
if ((username is not None) and (password is not None)):
logger.debug('Found credentials in index url for %s', netloc)
return index_url_user_password
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug('Found credentials in netrc for %s', netloc)
return netrc_auth
if allow_keyring:
kr_auth = (get_keyring_auth(index_url, username) or get_keyring_auth(netloc, username))
if kr_auth:
logger.debug('Found credentials in keyring for %s', netloc)
return kr_auth
return (username, password)
def _get_url_and_credentials(self, original_url):
(url, netloc, _) = split_auth_netloc_from_url(original_url)
(username, password) = self.passwords.get(netloc, (None, None))
if ((username is None) and (password is None)):
(username, password) = self._get_new_credentials(original_url)
if ((username is not None) or (password is not None)):
username = (username or '')
password = (password or '')
self.passwords[netloc] = (username, password)
assert (((username is not None) and (password is not None)) or ((username is None) and (password is None))), 'Could not load credentials from url: {}'.format(original_url)
return (url, username, password)
def __call__(self, req):
(url, username, password) = self._get_url_and_credentials(req.url)
req.url = url
if ((username is not None) and (password is not None)):
req = HTTPBasicAuth(username, password)(req)
req.register_hook('response', self.handle_401)
return req
def _prompt_for_password(self, netloc):
username = ask_input('User for {}: '.format(netloc))
if (not username):
return (None, None, False)
auth = get_keyring_auth(netloc, username)
if (auth and (auth[0] is not None) and (auth[1] is not None)):
return (auth[0], auth[1], False)
password = ask_password('Password: ')
return (username, password, True)
def _should_save_password_to_keyring(self):
if (not keyring):
return False
return (ask('Save credentials to keyring [y/N]: ', ['y', 'n']) == 'y')
def handle_401(self, resp, **kwargs):
if (resp.status_code != 401):
return resp
if (not self.prompting):
return resp
parsed = urllib_parse.urlparse(resp.url)
(username, password, save) = self._prompt_for_password(parsed.netloc)
self._credentials_to_save = None
if ((username is not None) and (password is not None)):
self.passwords[parsed.netloc] = (username, password)
if (save and self._should_save_password_to_keyring()):
self._credentials_to_save = (parsed.netloc, username, password)
resp.content
resp.raw.release_conn()
req = HTTPBasicAuth((username or ''), (password or ''))(resp.request)
req.register_hook('response', self.warn_on_401)
if self._credentials_to_save:
req.register_hook('response', self.save_credentials)
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
if (resp.status_code == 401):
logger.warning('401 Error, Credentials not correct for %s', resp.request.url)
def save_credentials(self, resp, **kwargs):
assert (keyring is not None), 'should never reach here without keyring'
if (not keyring):
return
creds = self._credentials_to_save
self._credentials_to_save = None
if (creds and (resp.status_code < 400)):
try:
logger.info('Saving credentials to keyring')
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials') |
def get_numeric_features(df, target_column):
numeric_dtypes = ['int', 'bigint', 'long', 'float', 'double', 'decimal']
numeric_features = [column_name for (column_name, column_type) in df.dtypes if ((column_type in numeric_dtypes) and (column_name != target_column))]
return numeric_features |
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if (tok.idx == start_pos):
found_tok = tok
break
return found_tok |
def main():
print('Preparing training ...')
with codecs.open(opt.train_src, 'r', 'utf-8') as src_file:
src_line = src_file.readline().strip().split()
(_, _, nFeatures) = onmt.IO.extract_features(src_line)
fields = onmt.IO.ONMTDataset.get_fields(nFeatures)
print('Building Training...')
train = onmt.IO.ONMTDataset(opt.train_src, opt.train_tgt, fields, opt)
print('Building Vocab...')
onmt.IO.ONMTDataset.build_vocab(train, opt)
print('Building Valid...')
valid = onmt.IO.ONMTDataset(opt.valid_src, opt.valid_tgt, fields, opt)
print('Saving train/valid/fields')
torch.save(onmt.IO.ONMTDataset.save_vocab(fields), open((opt.save_data + '.vocab.pt'), 'wb'))
train.fields = []
valid.fields = []
torch.save(train, open((opt.save_data + '.train.pt'), 'wb'))
torch.save(valid, open((opt.save_data + '.valid.pt'), 'wb')) |
def _read_json(path, encoding='utf-8', fields=None, dropna=True):
if fields:
fields = set(fields)
with open(path, 'r', encoding=encoding) as f:
for (line_idx, line) in enumerate(f):
data = json.loads(line)
if (fields is None):
(yield (line_idx, data))
continue
_res = {}
for (k, v) in data.items():
if (k in fields):
_res[k] = v
if (len(_res) < len(fields)):
if dropna:
continue
else:
raise ValueError('invalid instance at line: {}'.format(line_idx))
(yield (line_idx, _res)) |
def get_reporter(mode, *args, **kwargs):
reporter_cls = _hpopt_modes.get(mode)
if (reporter_cls is None):
logger.warning(f'hpopt_mode {mode} is not supported, reverting to generic')
reporter_cls = _hpopt_modes[DEFAULT_REPORTER]
reporter = reporter_cls(*args, **kwargs)
if (not reporter.is_available):
logger.warning('Reverting to a generic reporter')
reporter_cls = _hpopt_modes[DEFAULT_REPORTER]
reporter = reporter_cls(*args, **kwargs)
return reporter |
class ConcatGenerator(NetworkBase):
def __init__(self, bg_dim, src_dim, tsf_dim, conv_dim=64, repeat_num=6):
super(ConcatGenerator, self).__init__()
self._name = 'concat_generator'
self.n_down = 3
self.repeat_num = repeat_num
self.bg_model = ResNetGenerator(conv_dim=conv_dim, c_dim=bg_dim, repeat_num=repeat_num, k_size=3, n_down=self.n_down)
self.tsf_model = ResUnetGenerator(conv_dim=conv_dim, c_dim=((3 + src_dim) + tsf_dim), repeat_num=repeat_num, k_size=3, n_down=self.n_down)
def forward(self, bg_inputs, inputs):
img_bg = self.bg_model(bg_inputs)
(tsf_img, tsf_mask) = self.tsf_model(inputs)
return (img_bg, tsf_img, tsf_mask)
def inference(self, inputs):
(tsf_img, tsf_mask) = self.tsf_model(inputs)
return (tsf_img, tsf_mask) |
def test_byte():
a = ak.highlevel.Array(np.array([ord(x) for x in 'hey there'], dtype=np.uint8), check_valid=True)
a = ak.with_parameter(a, '__array__', 'byte')
assert (bytes(a) == b'hey there')
assert (str(a) == str([ord(c) for c in 'hey there']))
assert (ak.to_list(a) == b'hey there') |
_numpy_output(positive=True, casting=np.float64)
def test_powr(A: dace.int64[1], B: dace.int64[(5, 5)]):
return (A ** B) |
class CosineScheduler(BaseLearningRateScheduler):
def __init__(self, init_lr, max_iter):
self.init_lr = init_lr
self.max_iter = max_iter
def get_learning_rate(self, iter):
return (self.init_lr * ((math.cos((((iter * 1.0) / self.max_iter) * math.pi)) + 1.0) * 0.5)) |
def main(instanc_size=511, num_threads=24):
crop_path = './crop{:d}'.format(instanc_size)
if (not exists(crop_path)):
makedirs(crop_path)
for sub_set in sub_sets:
sub_set_base_path = join(got10k_base_path, sub_set)
for video_set in sorted(listdir(sub_set_base_path)):
videos = sorted(listdir(join(sub_set_base_path, video_set)))
n_videos = len(videos)
with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
fs = [executor.submit(crop_video, sub_set, video_set, video, crop_path, instanc_size) for video in videos]
for (i, f) in enumerate(futures.as_completed(fs)):
printProgress(i, n_videos, prefix=video_set, suffix='Done ', barLength=40) |
def test__setup_logging_single_verbose_without_log_file():
logging.shutdown()
importlib.reload(logging)
_setup_logging(1, False)
logger = logging.getLogger('')
assert (len(logger.handlers) == 1)
assert (logger.level == logging.INFO)
logging.shutdown()
importlib.reload(logging) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, drop_rate=0.0):
super(ResNet, self).__init__()
self.in_planes = 64
self.droprate = drop_rate
self.conv1 = conv3x3(3, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, self.droprate))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
y = self.linear(out)
return y
def feature_list(self, x):
out_list = []
out = F.relu(self.bn1(self.conv1(x)))
out_list.append(out)
out = self.layer1(out)
out_list.append(out)
out = self.layer2(out)
out_list.append(out)
out = self.layer3(out)
out_list.append(out)
out = self.layer4(out)
out_list.append(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
y = self.linear(out)
return (y, out_list)
def intermediate_forward(self, x, layer_index):
out = F.relu(self.bn1(self.conv1(x)))
if (layer_index == 1):
out = self.layer1(out)
elif (layer_index == 2):
out = self.layer1(out)
out = self.layer2(out)
elif (layer_index == 3):
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
elif (layer_index == 4):
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out
def penultimate_forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
penultimate = self.layer4(out)
out = F.avg_pool2d(penultimate, 4)
out = out.view(out.size(0), (- 1))
y = self.linear(out)
return (y, penultimate) |
def tidy_ylabel(metric):
if (metric == 'val_mi'):
return 'MI'
elif (metric == 'val_au'):
return 'AU'
elif (metric == 'train_unweighted_reg_loss'):
return 'KL'
elif (metric == 'val_ppl'):
return 'Validation PPL'
return 'FIXFIXFIXFIXFIX' |
class VermaModuleHomset(Homset):
def __call__(self, x, **options):
if isinstance(x, VermaModuleMorphism):
if (x.parent() is self):
return x
if (x.parent() == self):
x._set_parent(self)
return x
if (x.domain() != self.domain()):
x = (x * Hom(self.domain(), x.domain()).natural_map())
if (x.codomain() != self.codomain()):
x = (Hom(x.codomain(), self.codomain()).natural_map() * x)
return x
if (x in self.base_ring()):
if (self.singular_vector() is None):
return self.zero()
return self.element_class(self, self.base_ring()(x))
return super().__call__(x, **options)
def _an_element_(self):
return self.natural_map()
_method
def singular_vector(self):
if self.is_endomorphism_set():
return self.codomain().highest_weight_vector()
if (self.domain()._dominant_data[0] != self.codomain()._dominant_data[0]):
return None
from sage.combinat.root_system.coxeter_group import CoxeterGroup
W = CoxeterGroup(self.domain()._g._cartan_type)
wp = W.from_reduced_word(self.domain()._dominant_data[1])
w = W.from_reduced_word(self.codomain()._dominant_data[1])
if (not w.bruhat_le(wp)):
return None
C = self.codomain()
pbw = C._pbw
f = C._g.f()
F = {i: pbw(f[i]) for i in f.keys()}
red_word = (wp * (~ w)).reduced_word()
rho = C._weight.parent().rho()
ac = C._weight.parent().simple_coroots()
elt = pbw.one()
wt = C._weight
for i in reversed(red_word):
exp = (wt + rho).scalar(ac[i])
if ((exp not in ZZ) or (exp < 0)):
return None
elt = ((F[i] ** ZZ(exp)) * elt)
wt = wt.dot_action([i])
return C.highest_weight_vector()._acted_upon_(elt, False)
_method
def natural_map(self):
if (self.singular_vector() is None):
return self.zero()
return self.element_class(self, self.base_ring().one())
_method
def zero(self):
return self.element_class(self, self.base_ring().zero())
def dimension(self):
if (self.singular_vector() is None):
return ZZ.zero()
return ZZ.one()
def basis(self):
if (self.singular_vector() is None):
return Family([])
return Family([self.natural_map()])
Element = VermaModuleMorphism |
def __eval(run_args):
trainer = SyMuxTrainer(run_args)
trainer.eval(dataset_path=run_args.dataset_path, types_path=run_args.types_path, input_reader_cls=input_reader.JsonInputReader) |
class LayerModelHelper(model_helper.ModelHelper):
def __init__(self, name, input_feature_schema, trainer_extra_schema, keep_blobs=False, use_attribution=True):
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
self._param_to_shape = {}
self._seed = None
self._sequence_seed = True
self.param_to_optim = {}
self.param_to_reg = {}
self._default_optimizer = None
self._loss = None
self._prediction = []
self._output_schema = None
self._post_grad_net_modifiers = []
self._final_net_modifiers = []
self._breakdown_map = None
self._input_feature_schema = (schema.NewRecord(self.net, input_feature_schema) if (not keep_blobs) else input_feature_schema.clone())
self._trainer_extra_schema = (schema.NewRecord(self.net, trainer_extra_schema) if (not keep_blobs) else trainer_extra_schema.clone())
self._metrics_schema = schema.Struct()
self._preproc_output_schema = None
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
self._initialize_params = True
self._transfer_learning_blob_name_mappings = None
self.ad_hoc_diagnose_blobs_and_operations = []
self.ad_hoc_plot_blobs = []
self.use_attribution = use_attribution
def clear_output_schema(self):
self._output_schema = None
def set_initialize_params(self, initialize_params):
self._initialize_params = initialize_params
def add_metric_field(self, name, value):
assert (name not in self._metrics_schema.fields), 'Try to add metric field twice: {}'.format(name)
self._metrics_schema = (self._metrics_schema + schema.Struct((name, value)))
def filter_metrics_schema(self, white_set):
logger.info('Filter metric schema with white_set {}'.format(white_set))
field_names = self._metrics_schema.field_names()
for name in field_names:
if (name not in white_set):
self._metrics_schema = (self._metrics_schema - schema.Struct((name, schema.Scalar())))
def add_ad_hoc_plot_blob(self, blob, dtype=None):
assert isinstance(blob, (six.string_types, core.BlobReference)), 'expect type str or BlobReference, but got {}'.format(type(blob))
dtype = (dtype or (np.float, (1,)))
self.add_metric_field(str(blob), schema.Scalar(dtype, blob))
self.ad_hoc_plot_blobs.append(blob)
def _get_global_constant_initializer_op(blob_name, array=None, dtype=None, initializer=None):
if (array is not None):
assert (initializer is None), 'Only one from array and initializer should be specified'
if (dtype is None):
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
op_name = None
if (array.dtype == np.int32):
op_name = 'GivenTensorIntFill'
elif (array.dtype == np.int64):
op_name = 'GivenTensorInt64Fill'
elif (array.dtype == np.str):
op_name = 'GivenTensorStringFill'
elif (array.dtype == np.bool):
op_name = 'GivenTensorBoolFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(op_name, [], blob_name, shape=array.shape, values=array.flatten().tolist())
else:
assert (initializer is not None)
initializer_op = initializer(blob_name)
return initializer_op
def add_global_constant(self, name, array=None, dtype=None, initializer=None):
assert isinstance(name, six.string_types), 'name should be a string as we are using it as map key'
assert (name not in self.global_constants), ('%s already added in global_constants' % name)
blob_name = self.net.NextBlob(name)
self.global_constants[name] = blob_name
initializer_op = LayerModelHelper._get_global_constant_initializer_op(blob_name, array, dtype, initializer)
assert (blob_name not in self.global_constant_initializers), ('there is already a initializer op associated with blob %s' % blob_name)
self.global_constant_initializers[blob_name] = initializer_op
return blob_name
def maybe_add_global_constant(self, name, *args, **kwargs):
if (name in self.global_constants):
blob_name = self.global_constants[name]
initializer_op = LayerModelHelper._get_global_constant_initializer_op(blob_name, *args, **kwargs)
assert utils.OpAlmostEqual(initializer_op, self.global_constant_initializers[blob_name], 'debug_info'), ('conflict initializers for global constant %s, previous %s, now %s' % (blob_name, str(initializer_op), str(self.global_constant_initializers[blob_name])))
return blob_name
return self.add_global_constant(name, *args, **kwargs)
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = {}
self.add_global_constant('ONE', 1.0)
self.add_global_constant('NAN', float('NaN'))
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in viewvalues(self.global_constant_initializers):
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def _validate_param_shape(self, param_name, shape):
if (param_name not in self._param_to_shape):
return
ref_shape = self._param_to_shape[param_name]
if (shape != ref_shape):
raise ValueError('Got inconsistent shapes between shared parameters when trying to map a blob in scope {0} to {1}. ref_shape : {2}, shape : {3}'.format(scope.CurrentNameScope(), param_name, ref_shape, shape))
def _validate_param_optim(self, param_name, optim):
if (param_name not in self.param_to_optim):
return
logger.info('{} shares the same parameter with another parameter. Validating if the same optimizer has been specified for them.'.format(param_name))
ref_optim = self.param_to_optim[param_name]
if (optim is None):
assert (ref_optim == self._default_optimizer), 'Optim for {} is None which will fall back to use default_optimizer. However, the optimizer that has been specified for this shared parameter is {} which is different from default_optimizer {}. Please check the optimizers specified for parameters shared with {} and the default_optimizer to ensure the consistency.'.format(param_name, ref_optim, self._default_optimizer, param_name)
elif (optim == self.NoOptim):
assert (ref_optim == self.NoOptim), 'Optim for {} is NoOptim. However, the optimizer for the parameters shared with {} is {} which is different from NoOptim. Please check the optimizer specified for other parameters in the shared group to ensure consistency.'.format(param_name, param_name, ref_optim)
elif isinstance(optim, Optimizer):
assert isinstance(ref_optim, Optimizer), 'Optim for {} is an instance of Optimizer. However, the optimizer for the parameters shared with {} is {} which is not an instance of Optimizer. Please check the optimizer specified for other parameters in the shared group to ensure consistency.'.format(param_name, param_name, ref_optim, optim)
assert ((type(optim) is type(ref_optim)) and (optim.attributes == ref_optim.attributes)), "Optim for {} is an instance of Optimizer. However, the optimizer for the parameters shared with {} is {}. This optimizer either doesn't have the same type as the current optimizer: {} vs {}, or its attributes such as learning rate are different from that of current optimizer which is {} vs {}. Please check the optimizer specified for other parameters in the shared group to ensure consistency.".format(param_name, param_name, ref_optim, type(optim), type(ref_optim), optim.attributes, ref_optim.attributes)
else:
raise ValueError('optim should be either None, NoOptim, or an instance of Optimizer, Got {} '.format(optim))
def create_param(self, param_name, shape, initializer, optimizer=None, ps_param=None, regularizer=None):
if isinstance(param_name, core.BlobReference):
param_name = str(param_name)
elif isinstance(param_name, six.string_types):
param_name = parameter_sharing_context.get_parameter_name(param_name)
else:
raise ValueError('Unsupported type for param_name')
param_blob = core.BlobReference(param_name)
if (len(initializer) == 1):
init_op_args = {}
else:
assert (len(initializer) == 2)
init_op_args = copy.deepcopy(initializer[1])
if (shape is not None):
assert ('shape' not in init_op_args)
init_op_args.update({'shape': shape})
initializer_op = None
if self._initialize_params:
initializer_op = core.CreateOperator(initializer[0], [], param_blob, **init_op_args)
param = layers.LayerParameter(parameter=param_blob, initializer=initializer_op, optimizer=optimizer, ps_param=ps_param, regularizer=regularizer)
self._validate_param_shape(param_name, shape)
self._validate_param_optim(param_name, optimizer)
self._param_to_shape[param_name] = shape
return param
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while (name in self._layer_names):
name = ((base_name + '_auto_') + str(index))
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = (param.optimizer or self.default_optimizer)
self.params.append(param.parameter)
if isinstance(param, layers.LayerParameter):
logger.info('Add parameter regularizer {0}'.format(param.parameter))
self.param_to_reg[param.parameter] = param.regularizer
elif isinstance(param, ParameterInfo):
logger.info('regularization is unsupported for ParameterInfo object')
else:
raise ValueError('unknown object type besides ParameterInfo and LayerParameter: {}'.format(param))
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
def add_post_grad_net_modifiers(self, modifier):
assert (modifier not in self._post_grad_net_modifiers), '{0} is already in {1}'.format(modifier, self._post_grad_net_modifiers)
assert isinstance(modifier, NetModifier), '{} has to be a NetModifier instance'.format(modifier)
self._post_grad_net_modifiers.append(modifier)
def add_final_net_modifiers(self, modifier):
assert (modifier not in self._final_net_modifiers), '{0} is already in {1}'.format(modifier, self._final_net_modifiers)
assert isinstance(modifier, NetModifier), '{} has to be a NetModifier instance'.format(modifier)
self._final_net_modifiers.append(modifier)
def seed(self):
return self._seed
def sequence_seed(self):
return self._sequence_seed
def store_seed(self, seed, sequence_seed=True):
self._seed = seed
self._sequence_seed = sequence_seed
def apply_seed(self, net):
if self._seed:
net.set_rand_seed(self._seed, self._sequence_seed)
def default_optimizer(self):
return self._default_optimizer
_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
def input_feature_schema(self):
return self._input_feature_schema
def trainer_extra_schema(self):
return self._trainer_extra_schema
def metrics_schema(self):
return self._metrics_schema
def output_schema(self):
assert (self._output_schema is not None)
return self._output_schema
_schema.setter
def output_schema(self, schema):
assert (self._output_schema is None)
self._output_schema = schema
def preproc_output_schema(self):
assert (self._preproc_output_schema is not None)
return self._preproc_output_schema
_output_schema.setter
def preproc_output_schema(self, schema):
assert (self._preproc_output_schema is None)
self._preproc_output_schema = schema
def prediction(self):
assert self._prediction, 'model prediction is empty'
return self._prediction
def add_prediction(self, prediction, weight=1.0):
assert (prediction is not None), 'Added prediction should not be None'
self._prediction.append((prediction, weight))
def transfer_learning_blob_name_mappings(self):
return self._transfer_learning_blob_name_mappings
_learning_blob_name_mappings.setter
def transfer_learning_blob_name_mappings(self, blob_name_mappings):
assert (blob_name_mappings is not None), 'Transfer learning blob name mappings should not be None'
self._transfer_learning_blob_name_mappings = blob_name_mappings
def loss(self):
assert (self._loss is not None)
return self._loss
def loss(self, loss):
assert (self._loss is None)
self._loss = loss
def has_loss(self):
return (self._loss is not None)
def add_loss(self, loss, name='unnamed'):
assert (loss is not None), 'Added loss should not be None'
assert (isinstance(loss, schema.Scalar) or isinstance(loss, schema.Struct)), 'Added loss should be a scalar or a struct'
if (self._loss is None):
self._loss = schema.Struct((name, loss))
else:
if isinstance(self._loss, schema.Scalar):
self._loss = schema.Struct(('unnamed', self._loss))
prefix_base = (name + '_auto_')
index = 0
prefix = name
while (prefix in self._loss):
prefix = (prefix_base + str(index))
index += 1
loss_struct = schema.Struct((prefix, loss))
self._loss = (self._loss + loss_struct)
def add_output_schema(self, name, value):
assert (value is not None), 'Added output schema {} should not be None'.format(name)
assert (isinstance(value, schema.Scalar) or isinstance(value, schema.Struct)), 'Added output schema {} should be a scalar or a struct.\n Now it is {}.'.format(name, type(value))
if (self._output_schema is None):
self._output_schema = schema.Struct((name, value))
else:
assert (name not in self._output_schema.fields), 'Output Schema Field {} already exists'.format(name)
self._output_schema = (self._output_schema + schema.Struct((name, value)))
def add_trainer_extra_schema(self, trainer_extra_schema):
trainer_extra_record = schema.NewRecord(self.net, trainer_extra_schema)
self._trainer_extra_schema += trainer_extra_record
def __getattr__(self, layer):
def is_functional_layer(layer):
if core.IsOperator(layer):
return True
elif layer.startswith('FunctionalLayer'):
return True
else:
return False
def resolve_functional_layer(layer):
if core.IsOperator(layer):
return layer
elif layer.startswith('FunctionalLayer'):
return layer[len('FunctionalLayer'):]
else:
raise ValueError(('%s cannot be resolved as functional layer' % layer))
if layer.startswith('__'):
raise AttributeError(layer)
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
new_layer = layers.create_layer(layer, self, *args, **kwargs)
if kwargs.get('output_to_metrics', False):
new_layer.export_output_for_metrics()
if kwargs.get('params_to_metrics', False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
elif is_functional_layer(layer):
layer = resolve_functional_layer(layer)
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
net.__getattr__(layer)(in_record.field_blobs(), out_record.field_blobs(), **kwargs)
if ('name' not in kwargs):
kwargs['name'] = layer
new_layer = layers.create_layer('Functional', self, *args, function=apply_operator, **kwargs)
if kwargs.get('output_to_metrics', False):
new_layer.export_output_for_metrics()
if kwargs.get('params_to_metrics', False):
new_layer.export_params_for_metrics()
return self.add_layer(new_layer)
return wrapper
else:
raise AttributeError('Trying to create non-registered layer: {}'.format(layer))
def layers(self):
return self._layers
def apply_regularizers_on_loss(self, train_net, train_init_net, blob_to_device=None):
logger.info('apply regularizer on loss')
for (param, regularizer) in viewitems(self.param_to_reg):
if (regularizer is None):
continue
logger.info('add regularizer {0} for param {1} to loss'.format(regularizer, param))
assert isinstance(regularizer, Regularizer)
added_loss_blob = regularizer(train_net, train_init_net, param, grad=None, by=RegularizationBy.ON_LOSS)
logger.info(added_loss_blob)
if (added_loss_blob is not None):
self.add_loss(schema.Scalar(blob=added_loss_blob), str(added_loss_blob))
def apply_regularizers_after_optimizer(self, train_net, train_init_net, grad_map, blob_to_device=None):
logger.info('apply regularizer after optimizer')
CPU = muji.OnCPU()
blob_to_device = (blob_to_device or {})
for (param, regularizer) in viewitems(self.param_to_reg):
if (regularizer is None):
continue
assert isinstance(regularizer, Regularizer)
logger.info('add regularizer {0} for param {1} to optimizer'.format(regularizer, param))
device = get_param_device(param, grad_map.get(str(param)), param_to_device=blob_to_device, default_device=CPU)
with core.DeviceScope(device):
regularizer(train_net, train_init_net, param, grad=grad_map.get(str(param)), by=RegularizationBy.AFTER_OPTIMIZER)
def apply_post_grad_net_modifiers(self, trainer_net, trainer_init_net, grad_map, blob_to_device=None, modify_output_record=False):
param_grad_map = {param: grad_map[param] for param in self.param_to_optim.keys() if (param in grad_map)}
for modifier in self._post_grad_net_modifiers:
modifier(trainer_net, trainer_init_net, param_grad_map, blob_to_device=blob_to_device, modify_output_record=modify_output_record)
def apply_final_net_modifiers(self, trainer_net, trainer_init_net, grad_map, blob_to_device=None, modify_output_record=False):
for modifier in self._final_net_modifiers:
modifier(trainer_net, trainer_init_net, grad_map, blob_to_device=blob_to_device, modify_output_record=modify_output_record)
def apply_optimizers(self, train_net, train_init_net, grad_map, blob_to_device=None):
CPU = muji.OnCPU()
blob_to_device = (blob_to_device or {})
for (param, optimizer) in viewitems(self.param_to_optim):
assert (optimizer is not None), 'default optimizer must have been set in add_layer'
device = get_param_device(param, grad_map.get(str(param)), param_to_device=blob_to_device, default_device=CPU)
if (device is not None):
del device.extra_info[:]
with core.DeviceScope(device):
optimizer(train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
def NoOptim(self, *args, **kwargs):
pass
def breakdown_map(self):
return self._breakdown_map
_map.setter
def breakdown_map(self, breakdown_map):
assert isinstance(breakdown_map, dict)
assert all((isinstance(k, six.string_types) for k in breakdown_map))
assert (sorted(breakdown_map.values()) == list(range(len(breakdown_map))))
self._breakdown_map = breakdown_map |
.parametrize('val1,val2,result', [(0, 1, 0), (1, 1, 1), ('b', 'b', inf), (Decimal(0.5), Decimal(0.3), 1.2)])
def test_lt(val1, val2, result):
assert (_lt(val1, val2) == result) |
def load_data_by_class(args, path):
if (path is None):
return (None, None, None)
if (args.model == 'cvae'):
(x_train, nt, image_dim) = load_data_set(path, batch=args.batch_size)
elif (args.model == 'cvae-style'):
(x_train, nt, image_dim) = load_data_set(path, isArray=True)
x_train = np.expand_dims(np.argmax(x_train, axis=(- 1)), axis=(- 1))
x_train = ((x_train * 2) - 1)
elif ((args.model == 'AlphaGAN') or (args.model == 'CycleGAN') or (args.model == 'GAN2D_AE') or (args.model == 'WGAN2D_AE')):
(x_train, nt, image_dim) = load_data_set(path, isArray=True, isTanh=True)
else:
print("Don't load dataSet")
return (x_train, nt, image_dim) |
def pattern_subst(pattern: List[str], rule_symbols: List[str], substitute_dict: Dict[(str, str)]) -> List[str]:
out = pattern
for symbol in rule_symbols:
out = subst(out, symbol, substitute_dict[symbol])
return out |
def make_setuptools_bdist_wheel_args(setup_py_path, global_options, build_options, destination_dir):
args = make_setuptools_shim_args(setup_py_path, global_options=global_options, unbuffered_output=True)
args += ['bdist_wheel', '-d', destination_dir]
args += build_options
return args |
def compare_slot_values(slot_values_ref, slot_values_hyp, service, use_fuzzy_match):
list_cor = []
slot_active = []
slot_cat = []
for slot in service['slots']:
slot_name = slot['name']
slot_cat.append(slot['is_categorical'])
if (slot_name in slot_values_ref):
slot_active.append(True)
if (slot_name in slot_values_hyp):
value_ref_list = slot_values_ref[slot_name]
value_hyp = slot_values_hyp[slot_name][0]
if slot['is_categorical']:
cor = float((value_ref_list[0] == value_hyp))
else:
cor = noncat_slot_value_match(value_ref_list, value_hyp, use_fuzzy_match)
list_cor.append(cor)
else:
list_cor.append(0.0)
else:
slot_active.append(False)
if (slot_name in slot_values_hyp):
list_cor.append(0.0)
else:
list_cor.append(1.0)
assert (len(list_cor) == len(service['slots']))
assert (len(slot_active) == len(service['slots']))
assert (len(slot_cat) == len(service['slots']))
return (list_cor, slot_active, slot_cat) |
def test_jim3():
text = str(ak.to_categorical(ak.Array(['one', 'one', 'two', 'three', 'one', 'three'])).type)
print(text)
parsedtype = deduce_type(text, True)
assert isinstance(parsedtype, ak.types.ArrayType)
assert (str(parsedtype) == text) |
def ispitch(x):
return ((len(x) == 2) and (x[0] in char2pit) and ((x[1] == 'O') or x[1].isdigit())) |
class FQEImpl(ContinuousQFunctionMixin, FQEBaseImpl):
_q_func_forwarder: DiscreteEnsembleQFunctionForwarder
_targ_q_func_forwarder: DiscreteEnsembleQFunctionForwarder |
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, mobilebert_config_file, pytorch_dump_path):
config = MobileBertConfig.from_json_file(mobilebert_config_file)
print(f'Building PyTorch model from configuration: {config}')
model = MobileBertForPreTraining(config)
model = load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path)
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict(), pytorch_dump_path) |
class ParallelRunner():
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
(self.parent_conns, self.worker_conns) = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
if (('sc2' in self.args.env) or ('group_matching' in self.args.env)):
base_seed = self.args.env_args.pop('seed')
self.ps = [Process(target=env_worker, args=(worker_conn, self.args.entity_scheme, CloudpickleWrapper(partial(env_fn, seed=(base_seed + rank), **self.args.env_args)))) for (rank, worker_conn) in enumerate(self.worker_conns)]
else:
self.ps = [Process(target=env_worker, args=(worker_conn, self.args.entity_scheme, CloudpickleWrapper(partial(env_fn, env_args=self.args.env_args, args=self.args)))) for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(('get_env_info', args))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info['episode_limit']
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = (- 100000)
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, (self.episode_limit + 1), preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(('close', None))
def reset(self, **kwargs):
self.batch = self.new_batch()
for parent_conn in self.parent_conns:
parent_conn.send(('reset', kwargs))
pre_transition_data = {}
for parent_conn in self.parent_conns:
data = parent_conn.recv()
for (k, v) in data.items():
if (k in pre_transition_data):
pre_transition_data[k].append(data[k])
else:
pre_transition_data[k] = [data[k]]
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False, test_scen=None, index=None, vid_writer=None):
if (test_scen is None):
test_scen = test_mode
assert (vid_writer is None), 'Writing videos not supported for ParallelRunner'
self.reset(test=test_scen, index=index)
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
self.mac.eval()
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for (b_idx, termed) in enumerate(terminated) if (not termed)]
final_env_infos = []
while True:
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to('cpu').numpy()
actions_chosen = {'actions': actions.unsqueeze(1)}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
action_idx = 0
for (idx, parent_conn) in enumerate(self.parent_conns):
if (idx in envs_not_terminated):
if (not terminated[idx]):
parent_conn.send(('step', cpu_actions[action_idx]))
action_idx += 1
post_transition_data = {'reward': [], 'terminated': []}
if self.args.entity_scheme:
pre_transition_data = {'entities': [], 'obs_mask': [], 'entity_mask': [], 'avail_actions': []}
else:
pre_transition_data = {'state': [], 'avail_actions': [], 'obs': []}
envs_not_terminated = [b_idx for (b_idx, termed) in enumerate(terminated) if (not termed)]
all_terminated = all(terminated)
if all_terminated:
break
for (idx, parent_conn) in enumerate(self.parent_conns):
if (not terminated[idx]):
data = parent_conn.recv()
post_transition_data['reward'].append((data['reward'],))
episode_returns[idx] += data['reward']
episode_lengths[idx] += 1
if (not test_mode):
self.env_steps_this_run += 1
env_terminated = False
if data['terminated']:
final_env_infos.append(data['info'])
if (data['terminated'] and (not data['info'].get('episode_limit', False))):
env_terminated = True
terminated[idx] = data['terminated']
post_transition_data['terminated'].append((env_terminated,))
for k in pre_transition_data:
pre_transition_data[k].append(data[k])
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
self.t += 1
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if (not test_mode):
self.t_env += self.env_steps_this_run
for parent_conn in self.parent_conns:
parent_conn.send(('get_stats', None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = (self.test_stats if test_mode else self.train_stats)
cur_returns = (self.test_returns if test_mode else self.train_returns)
log_prefix = ('test_' if test_mode else '')
infos = ([cur_stats] + final_env_infos)
cur_stats.update({k: sum((d.get(k, 0) for d in infos)) for k in set.union(*[set(d) for d in infos])})
cur_stats['n_episodes'] = (self.batch_size + cur_stats.get('n_episodes', 0))
cur_stats['ep_length'] = (sum(episode_lengths) + cur_stats.get('ep_length', 0))
cur_returns.extend(episode_returns)
n_test_runs = (max(1, (self.args.test_nepisode // self.batch_size)) * self.batch_size)
if (test_mode and (len(self.test_returns) == n_test_runs)):
self._log(cur_returns, cur_stats, log_prefix)
elif ((not test_mode) and ((self.t_env - self.log_train_stats_t) >= self.args.runner_log_interval)):
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, 'epsilon'):
self.logger.log_stat('epsilon', self.mac.action_selector.epsilon, self.t_env)
if ('sc2' in self.args.env):
self.logger.log_stat('forced_restarts', sum((es['restarts'] for es in env_stats)), self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat((prefix + 'return_mean'), np.mean(returns), self.t_env)
self.logger.log_stat((prefix + 'return_std'), np.std(returns), self.t_env)
returns.clear()
for (k, v) in stats.items():
if (k != 'n_episodes'):
self.logger.log_stat(((prefix + k) + '_mean'), (v / stats['n_episodes']), self.t_env)
stats.clear() |
def _make_unique_name(seen, name, min_version=0):
assert (name is not None)
i = min_version
x = (('%s_%d' % (name, i)) if i else name)
while (x in seen):
i += 1
x = ('%s_%d' % (name, i))
seen.add(x)
return x |
def create_hparams(flags):
return tf.contrib.training.HParams(src=flags.src, tgt=flags.tgt, train_prefix=flags.train_prefix, dev_prefix=flags.dev_prefix, test_prefix=flags.test_prefix, vocab_prefix=flags.vocab_prefix, embed_prefix=flags.embed_prefix, out_dir=flags.out_dir, num_units=flags.num_units, num_layers=flags.num_layers, num_encoder_layers=(flags.num_encoder_layers or flags.num_layers), num_decoder_layers=(flags.num_decoder_layers or flags.num_layers), dropout=flags.dropout, unit_type=flags.unit_type, encoder_type=flags.encoder_type, residual=flags.residual, time_major=flags.time_major, num_embeddings_partitions=flags.num_embeddings_partitions, attention=flags.attention, attention_architecture=flags.attention_architecture, output_attention=flags.output_attention, pass_hidden_state=flags.pass_hidden_state, optimizer=flags.optimizer, num_train_steps=flags.num_train_steps, batch_size=flags.batch_size, init_op=flags.init_op, init_weight=flags.init_weight, max_gradient_norm=flags.max_gradient_norm, learning_rate=flags.learning_rate, warmup_steps=flags.warmup_steps, warmup_scheme=flags.warmup_scheme, decay_scheme=flags.decay_scheme, colocate_gradients_with_ops=flags.colocate_gradients_with_ops, num_buckets=flags.num_buckets, max_train=flags.max_train, src_max_len=flags.src_max_len, tgt_max_len=flags.tgt_max_len, src_max_len_infer=flags.src_max_len_infer, tgt_max_len_infer=flags.tgt_max_len_infer, infer_batch_size=flags.infer_batch_size, beam_width=flags.beam_width, length_penalty_weight=flags.length_penalty_weight, sampling_temperature=flags.sampling_temperature, num_translations_per_input=flags.num_translations_per_input, sos=(flags.sos if flags.sos else vocab_utils.SOS), eos=(flags.eos if flags.eos else vocab_utils.EOS), subword_option=flags.subword_option, check_special_token=flags.check_special_token, forget_bias=flags.forget_bias, num_gpus=flags.num_gpus, epoch_step=0, steps_per_stats=flags.steps_per_stats, steps_per_external_eval=flags.steps_per_external_eval, share_vocab=flags.share_vocab, metrics=flags.metrics.split(','), log_device_placement=flags.log_device_placement, random_seed=flags.random_seed, override_loaded_hparams=flags.override_loaded_hparams, num_keep_ckpts=flags.num_keep_ckpts, avg_ckpts=flags.avg_ckpts, num_intra_threads=flags.num_intra_threads, num_inter_threads=flags.num_inter_threads) |
class Decoder(DecoderBase):
tiu_head_length = 50
dma_head_length = 39
def __init__(self, context: 'BM1688Context') -> None:
super().__init__()
self.context = context
def decode_tiu_cmd(self, reg_buf: memoryview, *, cmd_id, offset, subnet_id, core_id) -> TiuCmd:
assert (cmd_id is not None), '1688 must assign cmd_id manully'
for head_cls in TiuHeads:
head = head_cls.from_buffer(reg_buf, offset)
op_info = tiu_index.get(head, None)
if (op_info is not None):
break
assert (op_info is not None), f'Unable to decode TIU code at offset {offset} out of {len(reg_buf)} total. Potential head identified as {head}'
op_clazz = op_class_dic[op_info.name]
reg = self.decode_reg(op_clazz, buf=reg_buf, offset=offset)
buf = reg_buf[offset:(offset + (op_clazz.length // 8))]
param_fn = self.context.opparam_converter.get(reg.OP_NAME, None)
cmd = op_info(reg, buf=buf, cmd_id=cmd_id, subnet_id=subnet_id, core_id=core_id, param_fn=param_fn)
return cmd
def decode_dma_cmd(self, reg_buf: memoryview, *, cmd_id, offset, subnet_id, core_id) -> DmaCmd:
assert (cmd_id is not None), '1688 must assign cmd_id manully'
head = DmaHead.from_buffer(reg_buf, offset)
op_info = dma_index.get((head.cmd_short, head.cmd_type, head.cmd_sp_func), None)
assert (op_info is not None), f'Unable to decode DMA code at offset {offset} out of {len(reg_buf)} total. Potential head identified as {head}'
op_clazz = op_class_dic[op_info.name]
reg = self.decode_reg(op_clazz, reg_buf, offset=offset)
buf = reg_buf[offset:(offset + (op_clazz.length // 8))]
param_fn = self.context.opparam_converter.get(reg.OP_NAME, None)
cmd = op_info(reg, buf=buf, cmd_id=cmd_id, subnet_id=subnet_id, core_id=core_id, param_fn=param_fn)
return cmd
def decode_dma_cmds(self, reg_buf: memoryview, *, core_id=0, subnet_id=0, **_) -> List[atomic_reg]:
offset = 0
res = []
cmd_id = 1
while (offset < len(reg_buf)):
cmd = self.decode_dma_cmd(reg_buf, offset=offset, core_id=core_id, cmd_id=cmd_id, subnet_id=subnet_id)
cmd_id += 1
offset += (cmd.reg.length // 8)
res.append(cmd)
if (isinstance(cmd, dma_sys) and (cmd.cmd_special_function == 0) and (cmd.reserved0 == 1)):
break
if self.buf_is_end(reg_buf[offset:], cmd, dma_sys):
break
return res
def decode_tiu_cmds(self, reg_buf: memoryview, *, core_id=0, subnet_id=0, **_) -> List[atomic_reg]:
offset = 0
res = []
cmd_id = 1
while (offset < len(reg_buf)):
cmd = self.decode_tiu_cmd(reg_buf, offset=offset, core_id=core_id, subnet_id=subnet_id, cmd_id=cmd_id)
cmd_id += 1
offset += (cmd.reg.length // 8)
res.append(cmd)
if (isinstance(cmd.reg, tiu_sys) and (cmd.reg.tsk_eu_typ == 31) and (cmd.reg.rsvd1 == 1)):
break
if self.buf_is_end(reg_buf[offset:], cmd, tiu_sys):
break
return res
def buf_is_end(reg_buf, operation: BaseTpuCmd, end_op):
is_sys = isinstance(operation.reg, end_op)
is_less_1024 = ((len(reg_buf) * 8) < 1025)
if (is_sys and is_less_1024 and (not np.any(np.frombuffer(reg_buf, np.uint8)))):
return True
return False |
def test_set_progress_bar_enabled():
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert (not are_progress_bars_disabled()) |
def register_Ns3RectangleValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Rectangle const &', 'value')])
cls.add_constructor([param('ns3::RectangleValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::Rectangle', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::Rectangle const &', 'value')])
return |
_utils.test(arch=archs_support_ndarray_ad, require=ti.extension.adstack)
def test_multiple_ib_multiple_outermost():
x = ti.ndarray(float, (), needs_grad=True)
y = ti.ndarray(float, (), needs_grad=True)
def compute_y(x: ti.types.ndarray(), y: ti.types.ndarray()):
for j in range(2):
for i in range(3):
y[None] += x[None]
for i in range(3):
y[None] += x[None]
for j in range(2):
for i in range(3):
y[None] += x[None]
for i in range(3):
y[None] += x[None]
x[None] = 1.0
with ti.ad.Tape(y):
compute_y(x, y)
assert (y[None] == 24.0)
assert (x.grad[None] == 24.0) |
class MemoryChunkCCRetval(MemoryChunk):
def declare_class_members(self):
return ''
def declare_call_locals(self):
return je(ri(8, '\n cdef ComplexNumber {{ myself.name }} = (self.domain_element._new())\n '), myself=self)
def declare_parameter(self):
return ('%s %s' % (self.storage_type.c_reference_type(), self.name))
def pass_argument(self):
return je('(<mpc_t>({{ myself.name }}.__re))', myself=self)
def pass_call_c_argument(self):
return 'result' |
class BlenderbotTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = BlenderbotTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, trim_offsets=True, **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if (pre_tok_state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop('type'))
pre_tok_state['add_prefix_space'] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
tokenizer_component = 'post_processor'
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
if ('sep' in state):
state['sep'] = tuple(state['sep'])
if ('cls' in state):
state['cls'] = tuple(state['cls'])
changes_to_apply = False
if (state.get('add_prefix_space', add_prefix_space) != add_prefix_space):
state['add_prefix_space'] = add_prefix_space
changes_to_apply = True
if (state.get('trim_offsets', trim_offsets) != trim_offsets):
state['trim_offsets'] = trim_offsets
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop('type'))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
def mask_token(self) -> str:
if (self._mask_token is None):
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
_token.setter
def mask_token(self, value):
value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value)
self._mask_token = value
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert (self.add_prefix_space or (not is_split_into_words)), f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None):
return (token_ids_0 + [self.eos_token_id])
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
inputs = []
for (is_user, text) in conversation.iter_texts():
if is_user:
inputs.append((' ' + text))
else:
inputs.append(text)
full_string = ' '.join(inputs)
input_ids = self.encode(full_string)
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids |
class RandomIdentitySamplerGcn(Sampler):
def __init__(self, data_source, batch_size, num_instances):
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = (self.batch_size // self.num_instances)
self.index_dic = defaultdict(list)
for (index, (_, pid, _, _)) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if (num < self.num_instances):
num = self.num_instances
self.length += (num - (num % self.num_instances))
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if (len(idxs) < self.num_instances):
idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if (len(batch_idxs) == self.num_instances):
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while (len(avai_pids) >= self.num_pids_per_batch):
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if (len(batch_idxs_dict[pid]) == 0):
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length |
def load_config_from_file(file_path):
ret = copy.deepcopy(g_cfg)
ret.merge_from_file(file_path)
return ret |
def CalculateGutmanTopo(mol):
nAT = mol.GetNumAtoms(onlyExplicit=True)
deltas = np.array([x.GetDegree() for x in mol.GetAtoms()])
Distance = Chem.GetDistanceMatrix(mol)
res = _Gutman(Distance, deltas, nAT)
return res |
class Function(EntryBase):
def __init__(self, j):
super().__init__(j, 'function')
self.return_value_type = None
self.params = []
self.is_device_command = False
if ('parameters' in j):
for x in j['parameters']:
field = Field(x)
if (field.name.snake_case == ''):
self.return_value_type = field.type
else:
self.params += [field]
if ('is_device_command' in j):
self.is_device_command = True |
class BuildModel_M3_Full(object):
if (__name__ == '__main__'):
port = '/dev/ttyUSB0'
step = 1000
repeat = 10
N = (5000 * 125)
samples = 1000
timebase = 1
post_trigger = True
threshold = 2000
posedge_trigger = True
vertical_offset = 0
delay = 500
plain_len = 32
cipher_len = 32
random.seed()
ser = serial.Serial(port)
time.sleep(0.2)
if post_trigger:
preTriggerSamples = 0
postTriggerSamples = samples
else:
preTriggerSamples = samples
postTriggerSamples = 0
chandle = ctypes.c_int16()
status = ps.ps5000aOpenUnit(ctypes.byref(chandle), None, ps.PS5000A_DEVICE_RESOLUTION['PS5000A_DR_8BIT'])
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
status = ps.ps5000aSetChannel(chandle, ps.PICO_CHANNEL['A'], 1, ps.PICO_COUPLING['DC'], ps.PS5000A_RANGE['PS5000A_1V'], vertical_offset)
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
status = ps.ps5000aSetChannel(chandle, ps.PICO_CHANNEL['B'], 1, ps.PICO_COUPLING['DC'], ps.PS5000A_RANGE['PS5000A_5V'], 0)
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
if posedge_trigger:
status = ps.ps5000aSetSimpleTrigger(chandle, 1, ps.PICO_CHANNEL['B'], mV2adc(threshold, ps.PS5000A_RANGE['PS5000A_5V'], ctypes.c_int16(32512)), ps.PS5000A_THRESHOLD_DIRECTION['PS5000A_RISING'], delay, 0)
else:
status = ps.ps5000aSetSimpleTrigger(chandle, 1, ps.PICO_CHANNEL['B'], mV2adc(threshold, ps.PS5000A_RANGE['PS5000A_5V'], ctypes.c_int16(32512)), ps.PS5000A_THRESHOLD_DIRECTION['PS5000A_FALLING'], delay, 0)
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
cmaxSamples = ctypes.c_int32()
status = ps.ps5000aMemorySegments(chandle, repeat, ctypes.byref(cmaxSamples))
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
print('Max samples for each trace={0}'.format(cmaxSamples))
status = ps.ps5000aSetNoOfCaptures(chandle, repeat)
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
Databuffer = []
for i in range(repeat):
buffer = (ctypes.c_int16 * samples)()
Databuffer.append(buffer)
status = ps.ps5000aSetDataBuffers(chandle, ps.PICO_CHANNEL['A'], ctypes.byref(buffer), None, samples, i, 0)
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
trs = TRS_TraceSet.TRS_TraceSet('BuildModel_M3_Full_1000Samples_repeat10.trs')
trs.write_header(N, samples, True, (32 + 32), 2e-09, (2 / 65536))
for i in range(0, N):
plaintext = bytearray([secrets.randbits(8) for j in range(0, plain_len)])
plaintext[28] = (i % 125)
status = ps.ps5000aRunBlock(chandle, preTriggerSamples, postTriggerSamples, timebase, None, 0, None, None)
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
ser.write(plaintext)
ser.write(repeat.to_bytes(1, byteorder='big'))
ciphertext = bytearray(ser.read(cipher_len))
ready = ctypes.c_int16(0)
check = ctypes.c_int16(0)
while (ready.value == check.value):
status = ps.ps5000aIsReady(chandle, ctypes.byref(ready))
overflow = (ctypes.c_int16 * repeat)()
cTotalSamples = ctypes.c_int32(samples)
status = ps.ps5000aGetValuesBulk(chandle, ctypes.byref(cTotalSamples), 0, (repeat - 1), 0, 0, ctypes.byref(overflow))
if (status != PICO_STATUS['PICO_OK']):
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
for j in range(repeat):
if (overflow[j] != 0):
print('overflow!')
avg = np.round(np.mean(Databuffer, axis=0)).astype(np.int16)
trs.write_trace(plaintext, ciphertext, np.array(avg), True)
if (((i % step) == 0) and (i > 0)):
print(('i=' + str(i)))
print('plain=')
PrintHexData(plaintext)
print('cipher=')
PrintHexData(ciphertext)
trs.flush()
trs.close() |
def get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout):
inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])
inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])
cont_train = True
if (pretrain == 'pre'):
cont_train = False
embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], (- 0.1), 0.1), name='embedding_matrix', trainable=cont_train)
embedding_matrix_cond = tf.Variable(tf.random_uniform([vocab_size, input_size], (- 0.1), 0.1), name='embedding_matrix', trainable=cont_train)
embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)
embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix_cond, inputs_cond)
inputs_list = [tf.squeeze(x) for x in tf.split(1, max_seq_length, embedded_inputs)]
inputs_cond_list = [tf.squeeze(x) for x in tf.split(1, max_seq_length, embedded_inputs_cond)]
drop_prob = None
if dropout:
drop_prob = 0.1
lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)
start_state = tf.zeros([batch_size, lstm_encoder.state_size])
(fw_outputs, fw_states) = lstm_encoder(inputs_list, start_state, 'LSTM')
(fw_outputs_cond, fw_states_cond) = lstm_encoder(inputs_cond_list, fw_states[(- 1)], 'LSTMcond')
fw_outputs_fin = fw_outputs_cond[(- 1)]
(bw_outputs, bw_states) = lstm_encoder(inputs_list[::(- 1)], start_state, 'LSTM_bw')
(bw_outputs_cond, bw_states_cond) = lstm_encoder(inputs_cond_list[::(- 1)], bw_states[(- 1)], 'LSTMcond_bw')
bw_outputs_fin = bw_outputs_cond[(- 1)]
outputs_fin = tf.concat(1, [fw_outputs_fin, bw_outputs_fin])
if (tanhOrSoftmax == 'tanh'):
model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin)
else:
model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin)
return (model, [inputs, inputs_cond]) |
def merge_meta(meta1, meta2):
links = {}
for link in meta2['links'][0]:
if (link[0] in links):
links[link[0]].append([link[2], link[3]])
else:
links[link[0]] = [[link[2], link[3]]]
s_start = meta1['start']
links_with_context = {k: [[(s + s_start), (e + s_start)] for (s, e) in v] for (k, v) in links.items()}
return {'md5': meta2['md5'], 'title': meta2['title'], 's': {'text': meta2['s'], 'links': links}, 's_with_context': {'text': meta1['s_with_context'], 's_loc': [meta1['start'], meta1['end']], 'links': links_with_context}} |
_safe_enum
_enum
class OMPScheduleType(aenum.AutoNumberEnum):
Default = ()
Static = ()
Dynamic = ()
Guided = () |
def bench4():
desc = 'Rational polynomial arithmetic using Sage. Compute (x^29+17*x-5)^200.'
x = PolynomialRing(QQ, 'x').gen()
t = cputime()
f = (((x ** 29) + (17 * x)) - 5)
a = (f ** 200)
return (desc, cputime(t)) |
class Graph(Layer):
def __init__(self):
self.namespace = set()
self.nodes = OrderedDict()
self.inputs = {}
self.input_order = []
self.outputs = {}
self.output_order = []
self.input_config = []
self.output_config = []
self.node_config = []
self.layer_cache = {}
self.shape_cache = {}
self._cache_enabled = True
def __call__(self, X, mask=None, train=False):
if (type(X) != dict):
return super(Graph, self).__call__(X, mask, train)
else:
tmp_cache_enabled = self.cache_enabled
self.cache_enabled = False
tmp_previous = {}
for (name, input) in self.inputs.items():
layer = Layer(batch_input_shape=input.input_shape)
layer.input = X[name]
if hasattr(self, 'get_input_mask'):
layer.get_input_mask = (lambda _: mask[name])
if hasattr(input, 'previous'):
tmp_previous[name] = input.previous
input.set_previous(layer, False)
Y = self.get_output(train=train)
for (name, input) in self.inputs.items():
if (name in tmp_previous):
input.set_previous(tmp_previous[name], False)
else:
input.clear_previous(False)
self.cache_enabled = tmp_cache_enabled
return Y
def cache_enabled(self):
return self._cache_enabled
_enabled.setter
def cache_enabled(self, value):
self._cache_enabled = value
for l in self.nodes.values():
l.cache_enabled = value
for l in self.inputs.values():
l.cache_enabled = value
def layer_cache(self):
return super(Graph, self).layer_cache
_cache.setter
def layer_cache(self, value):
self._layer_cache = value
for layer in self.nodes.values():
layer.layer_cache = self._layer_cache
for layer in self.inputs.values():
layer.layer_cache = self._layer_cache
def shape_cache(self):
return super(Graph, self).shape_cache
_cache.setter
def shape_cache(self, value):
self._shape_cache = value
for layer in self.nodes.values():
layer.shape_cache = self._shape_cache
for layer in self.inputs.values():
layer.shape_cache = self._shape_cache
def nb_input(self):
return len(self.inputs)
def nb_output(self):
return len(self.outputs)
def trainable_weights(self):
weights = []
for l in self.nodes.values():
if l.trainable:
weights += l.get_params()[0]
return weights
def regularizers(self):
regularizers = []
for l in self.nodes.values():
if l.trainable:
regularizers += l.get_params()[1]
return regularizers
def constraints(self):
constraints = []
for l in self.nodes.values():
if l.trainable:
constraints += l.get_params()[2]
return constraints
def learning_rate_multipliers(self):
learning_rate_multipliers = []
for l in self.nodes.values():
if l.trainable:
learning_rate_multipliers += l.get_params()[3]
return learning_rate_multipliers
def updates(self):
updates = []
for l in self.nodes.values():
if l.trainable:
updates += l.get_params()[4]
return updates
def state_updates(self):
state_updates = []
for l in self.nodes.values():
if getattr(l, 'stateful', False):
state_updates += l.get_params()[4]
return state_updates
def reset_states(self):
for l in self.nodes.values():
if (hasattr(l, 'reset_states') and getattr(l, 'stateful', False)):
l.reset_states()
def set_previous(self, layer, connection_map={}, reset_weights=True):
if (self.nb_input != layer.nb_output):
raise Exception('Cannot connect layers: input count does not match output count.')
if (self.nb_input == 1):
self.inputs[self.input_order[0]].set_previous(layer, reset_weights)
else:
if (not connection_map):
raise Exception('Cannot attach multi-input layer: no connection_map provided.')
for (k, v) in connection_map.items():
if ((k in self.inputs) and (v in layer.outputs)):
self.inputs[k].set_previous(layer.outputs[v], reset_weights)
else:
raise Exception('Invalid connection map.')
def clear_previous(self, reset_weights=True):
for k in self.inputs.values():
k.clear_previous(reset_weights)
def input_shape(self):
if (self.nb_input == 1):
return self.inputs[self.input_order[0]].input_shape
else:
return dict([(k, v.input_shape) for (k, v) in self.inputs.items()])
def get_input(self, train=False):
if (len(self.inputs) == len(self.outputs) == 1):
return self.inputs[self.input_order[0]].get_input(train)
else:
return dict([(k, v.get_input(train)) for (k, v) in self.inputs.items()])
def input(self):
return self.get_input()
def output_shape(self):
if (self.nb_output == 1):
return self.outputs[self.output_order[0]].output_shape
else:
return dict([(k, v.output_shape) for (k, v) in self.outputs.items()])
def get_output(self, train=False):
if (len(self.inputs) == len(self.outputs) == 1):
return self.outputs[self.output_order[0]].get_output(train)
else:
return dict([(k, v.get_output(train)) for (k, v) in self.outputs.items()])
def add_input(self, name, input_shape=None, batch_input_shape=None, dtype='float'):
if (name in self.namespace):
raise Exception(('Duplicate node identifier: ' + name))
self.namespace.add(name)
self.input_order.append(name)
layer = Layer(name=name)
if input_shape:
layer.set_input_shape(((None,) + tuple(input_shape)))
elif batch_input_shape:
layer.set_input_shape(batch_input_shape)
if (dtype == 'float'):
layer.input = K.placeholder(shape=layer.input_shape, name=name)
elif ((input_shape and (len(input_shape) == 1)) or (batch_input_shape and (len(batch_input_shape) == 2))):
layer.input = K.placeholder(shape=layer.input_shape, dtype='int32', name=name)
else:
raise Exception('Type "int" can only be used with ndim==2 (Embedding).')
self.inputs[name] = layer
config = {'name': name, 'dtype': dtype}
if batch_input_shape:
config['batch_input_shape'] = batch_input_shape
else:
config['input_shape'] = input_shape
self.input_config.append(config)
def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', concat_axis=(- 1), dot_axes=(- 1), create_output=False):
if (name in self.namespace):
raise Exception(('Duplicate node identifier: ' + name))
layer.name = name
if input:
if (input not in self.namespace):
raise Exception(('Unknown node/input identifier: ' + input))
if (input in self.nodes):
layer.set_previous(self.nodes[input])
elif (input in self.inputs):
layer.set_previous(self.inputs[input])
if inputs:
to_merge = []
for n in inputs:
if (n in self.nodes):
to_merge.append(self.nodes[n])
elif (n in self.inputs):
to_merge.append(self.inputs[n])
else:
raise Exception(('Unknown identifier: ' + n))
merge = Merge(to_merge, mode=merge_mode, concat_axis=concat_axis, dot_axes=dot_axes)
layer.set_previous(merge)
self.namespace.add(name)
layer.layer_cache = self.layer_cache
layer.shape_cache = self.shape_cache
self.nodes[name] = layer
self.node_config.append({'name': name, 'input': input, 'inputs': inputs, 'merge_mode': merge_mode, 'concat_axis': concat_axis, 'dot_axes': dot_axes, 'create_output': create_output})
if create_output:
self.add_output(name, input=name)
def add_shared_node(self, layer, name, inputs=[], merge_mode=None, concat_axis=(- 1), dot_axes=(- 1), outputs=[], create_output=False):
if (name in self.namespace):
raise Exception(('Duplicate node identifier: ' + name))
for o in outputs:
if (o in self.namespace):
raise Exception(('Duplicate node identifier: ' + o))
if merge_mode:
if (merge_mode not in {'sum', 'ave', 'mul', 'dot', 'cos', 'concat', 'join'}):
raise Exception('Invalid merge mode')
layers = []
for i in range(len(inputs)):
input = inputs[i]
if (input in self.nodes):
n = self.nodes[input]
if (n.__class__.__name__ == 'Siamese'):
if (n.merge_mode is None):
for j in range(len(n.inputs)):
sh = SiameseHead(j)
sh.previous = n
layers.append(sh)
else:
layers.append(n)
else:
layers.append(n)
elif (input in self.inputs):
n = self.inputs[input]
layers.append(n)
else:
raise Exception(('Unknown identifier: ' + input))
s = Siamese(layer, layers, merge_mode, concat_axis=concat_axis, dot_axes=dot_axes, is_graph=True)
self.namespace.add(name)
self.nodes[name] = s
self.node_config.append({'name': name, 'inputs': inputs, 'merge_mode': merge_mode, 'concat_axis': concat_axis, 'dot_axes': dot_axes, 'create_output': (create_output if merge_mode else False)})
if (not merge_mode):
for i in range(len(outputs)):
sh = SiameseHead(i)
sh.previous = s
sh_name = outputs[i]
sh.name = sh_name
self.namespace.add(sh_name)
self.nodes[sh_name] = sh
self.node_config.append({'name': sh_name, 'inputs': [name], 'create_output': create_output})
if create_output:
self.add_output(sh_name, input=sh_name)
if (create_output and merge_mode):
if (merge_mode == 'join'):
raise Exception('Output can not be of type OrderedDict')
self.add_output(name, input=name)
def add_output(self, name, input=None, inputs=[], merge_mode='concat', concat_axis=(- 1), dot_axes=(- 1)):
if (name in self.output_order):
raise Exception(('Duplicate output identifier: ' + name))
if input:
if (input not in self.namespace):
raise Exception(('Unknown node/input identifier: ' + input))
if (input in self.nodes):
self.outputs[name] = self.nodes[input]
elif (input in self.inputs):
self.outputs[name] = self.inputs[input]
if inputs:
to_merge = []
for n in inputs:
if (n not in self.nodes):
raise Exception(('Unknown identifier: ' + n))
to_merge.append(self.nodes[n])
merge = Merge(to_merge, mode=merge_mode, concat_axis=concat_axis, dot_axes=dot_axes)
self.outputs[name] = merge
self.output_order.append(name)
self.output_config.append({'name': name, 'input': input, 'inputs': inputs, 'merge_mode': merge_mode, 'concat_axis': concat_axis, 'dot_axes': dot_axes})
def get_config(self):
return {'name': self.__class__.__name__, 'input_config': self.input_config, 'node_config': self.node_config, 'output_config': self.output_config, 'input_order': self.input_order, 'output_order': self.output_order, 'nodes': dict([(c['name'], self.nodes[c['name']].get_config()) for c in self.node_config])}
def count_params(self):
return sum([layer.count_params() for layer in self.nodes.values()])
def get_weights(self):
weights = []
for layer in self.nodes.values():
weights += layer.get_weights()
return weights
def set_weights(self, weights):
for layer in self.nodes.values():
nb_param = len(layer.get_weights())
layer.set_weights(weights[:nb_param])
weights = weights[nb_param:] |
class Hook(object):
def __init__(self):
raise NotImplementedError
def __call__(self, sess, epoch, iteration, model, loss):
raise NotImplementedError |
def write_can_to_msg(data, src, msg):
if (not isinstance(data[0], Sequence)):
data = [data]
can_msgs = msg.init('can', len(data))
for (i, d) in enumerate(data):
if (d[0] < 0):
continue
cc = can_msgs[i]
cc.address = d[0]
cc.busTime = 0
cc.dat = hex_to_str(d[2])
if (len(d) == 4):
cc.src = d[3]
cc.busTime = d[1]
else:
cc.src = src |
def shuffle_in_unison_scary(data):
rng_state = np.random.get_state()
for d in data:
np.random.set_state(rng_state)
np.random.shuffle(data[d])
return data |
.parametrize('traj,instance,output', [(trjdat, first_instance, (1.0 / 3.0)), (trjdat, second_instance, (1.0 / 2.0))])
def test_location_sequence_match(traj, instance, output):
at = attacks.LocationSequenceAttack(knowledge_length=1)
results = []
for i in range(1, 7):
results.append(at._match(single_traj=traj[(traj[user_id] == i)], instance=instance))
assert ((1.0 / sum(results)) == output) |
class TestRangePlugin(unittest.TestCase):
def test_max_range(self):
msg = rospy.wait_for_message('/sonar2', Range)
self.assertAlmostEqual(msg.range, msg.max_range)
def test_inside_range(self):
msg = rospy.wait_for_message('/sonar', Range)
self.assertTrue(((msg.range < 0.25) and (msg.range > 0.22)), 'actual value: {0}'.format(msg.range)) |
class NeuralStyleTransfer(BaseModel):
def __init__(self):
super().__init__()
def build_model(self, style_weight=0.01, content_weight=10000.0):
self.content_layers = ['block5_conv2']
self.style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1']
self.num_content_layers = len(self.content_layers)
self.num_style_layers = len(self.style_layers)
self.extractor = StyleContentModel(self.style_layers, self.content_layers)
self.style_weight = style_weight
self.content_weight = content_weight
def configure_optimizers(self, lr=0.02, optim='adam', momentum=0.0, beta_1=0.99, beta_2=0.999, epsilon=0.1, rho=0.9):
if (optim == 'adam'):
self.optimizer = keras.optimizers.Adam(learning_rate=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
elif (optim == 'sgd'):
self.optimizer = keras.optimizers.SGD(learning_rate=lr, momentum=momentum)
elif (optim == 'rmsprop'):
self.optimizer = keras.optimizers.RMSprop(learning_rate=lr, rho=rho, momentum=momentum, epsilon=epsilon)
def fit(self, content_image, style_image, epochs=1000, steps_per_epoch=100, output_path='stylized_image.png'):
self.style_targets = self.extractor(style_image)['style']
self.content_targets = self.extractor(content_image)['content']
self.stylized_image = tf.Variable(content_image)
start = time.time()
step = 0
for n in range(epochs):
for m in range(steps_per_epoch):
step += 1
self.train_step(self.stylized_image)
print('.', end='')
self.save_output(output_path)
print('Train step: {}'.format(step))
end = time.time()
print('Total time: {:.1f}'.format((end - start)))
()
def train_step(self, image):
with tf.GradientTape() as tape:
outputs = self.extractor(image)
loss = self.style_content_loss(outputs)
grad = tape.gradient(loss, image)
self.optimizer.apply_gradients([(grad, image)])
image.assign(self.clip_0_1(image))
def style_content_loss(self, outputs):
style_outputs = outputs['style']
content_outputs = outputs['content']
style_loss = tf.add_n([tf.reduce_mean(((style_outputs[name] - self.style_targets[name]) ** 2)) for name in style_outputs.keys()])
style_loss *= (self.style_weight / self.num_style_layers)
content_loss = tf.add_n([tf.reduce_mean(((content_outputs[name] - self.content_targets[name]) ** 2)) for name in content_outputs.keys()])
content_loss *= (self.content_weight / self.num_content_layers)
loss = (style_loss + content_loss)
return loss
def predict(self):
return self.stylized_image
def save_output(self, img_path):
output = tensor_to_image(self.stylized_image)
output.save(img_path)
def clip_0_1(self, image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0) |
def make_texture_2d_rgba8(tex: ti.types.rw_texture(num_dimensions=2, fmt=ti.Format.rgba8, lod=0), n: ti.i32):
for (i, j) in ti.ndrange(n, n):
ret = ti.cast(taichi_logo((ti.Vector([i, j]) / n)), ti.f32)
tex.store(ti.Vector([i, j]), ti.Vector([ret, 0.0, 0.0, 0.0])) |
class NLLEntropy(_Loss):
logger = logging.getLogger()
def __init__(self, padding_idx, config, rev_vocab=None, key_vocab=None):
super(NLLEntropy, self).__init__()
self.padding_idx = padding_idx
self.avg_type = config.avg_type
if ((rev_vocab is None) or (key_vocab is None)):
self.weight = None
else:
self.logger.info('Use extra cost for key words')
weight = np.ones(len(rev_vocab))
for key in key_vocab:
weight[rev_vocab[key]] = 10.0
self.weight = cast_type(torch.from_numpy(weight), FLOAT, config.use_gpu)
def forward(self, net_output, labels):
batch_size = net_output.size(0)
input = net_output.view((- 1), net_output.size((- 1)))
target = labels.view((- 1))
if (self.avg_type is None):
loss = F.nll_loss(input, target, size_average=False, ignore_index=self.padding_idx, weight=self.weight)
elif (self.avg_type == 'seq'):
loss = F.nll_loss(input, target, size_average=False, ignore_index=self.padding_idx, weight=self.weight)
loss = (loss / batch_size)
elif (self.avg_type == 'real_word'):
loss = F.nll_loss(input, target, size_average=True, ignore_index=self.padding_idx, weight=self.weight, reduce=False)
loss = loss.view((- 1), net_output.size(1))
loss = torch.sum(loss, dim=1)
word_cnt = torch.sum(torch.sign(labels), dim=1).float()
loss = (loss / word_cnt)
loss = torch.mean(loss)
elif (self.avg_type == 'word'):
loss = F.nll_loss(input, target, size_average=True, ignore_index=self.padding_idx, weight=self.weight)
else:
raise ValueError('Unknown avg type')
return loss |
(scope='module', params=[{'dim': 1, 'approx_order': 3}])
def dg_test_env(request):
return DGTermTestEnvornment(**request.param) |
_properties
class PatternMatchAndApply(ppl.Pass):
CATEGORY: str = 'Helper'
transformations = properties.ListProperty(element_type=xf.PatternTransformation, default=[], desc='The list of transformations to apply')
permissive = properties.Property(dtype=bool, default=False, desc='Whether to apply in permissive mode, i.e., apply in more cases where it may be unsafe.')
validate = properties.Property(dtype=bool, default=True, desc='If True, validates the SDFG after all transformations have been applied.')
validate_all = properties.Property(dtype=bool, default=False, desc='If True, validates the SDFG after each transformation applies.')
states = properties.ListProperty(element_type=SDFGState, default=None, allow_none=True, desc='If not None, only applies transformations to the given states.')
print_report = properties.Property(dtype=bool, default=None, allow_none=True, desc='Whether to show debug prints (or None to use configuration file).')
progress = properties.Property(dtype=bool, default=None, allow_none=True, desc='Whether to show progress printouts (or None to use configuration file).')
def __init__(self, transformations: Union[(xf.PatternTransformation, Iterable[xf.PatternTransformation])], permissive: bool=False, validate: bool=True, validate_all: bool=False, states: Optional[List[SDFGState]]=None, print_report: Optional[bool]=None, progress: Optional[bool]=None) -> None:
if isinstance(transformations, xf.TransformationBase):
self.transformations = [transformations]
else:
self.transformations = list(transformations)
self._metadata = get_transformation_metadata(self.transformations)
self.permissive = permissive
self.validate = validate
self.validate_all = validate_all
self.states = states
self.print_report = print_report
self.progress = progress
def depends_on(self) -> Set[Type[ppl.Pass]]:
result = set()
for p in self.transformations:
result.update(p.depends_on())
return result
def modifies(self) -> ppl.Modifies:
result = ppl.Modifies.Nothing
for p in self.transformations:
result |= p.modifies()
return result
def should_reapply(self, modified: ppl.Modifies) -> bool:
return any((p.should_reapply(modified) for p in self.transformations))
def apply_pass(self, sdfg: SDFG, pipeline_results: Dict[(str, Any)]) -> Dict[(str, List[Any])]:
applied_transformations = collections.defaultdict(list)
for xform in self.transformations:
try:
match = next((m for m in match_patterns(sdfg, [xform], metadata=self._metadata, permissive=self.permissive, states=self.states)))
except StopIteration:
continue
tsdfg = sdfg.sdfg_list[match.sdfg_id]
graph = (tsdfg.node(match.state_id) if (match.state_id >= 0) else tsdfg)
match._pipeline_results = pipeline_results
result = match.apply(graph, tsdfg)
applied_transformations[type(match).__name__].append(result)
if self.validate_all:
sdfg.validate()
if self.validate:
sdfg.validate()
if ((len(applied_transformations) > 0) and (self.print_report or ((self.print_report is None) and Config.get_bool('debugprint')))):
print('Applied {}.'.format(', '.join([('%d %s' % (len(v), k)) for (k, v) in applied_transformations.items()])))
if (len(applied_transformations) == 0):
return None
return applied_transformations |
class Corana(Benchmark):
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 5.0)] * self.N), ([5.0] * self.N)))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
d = [1.0, 1000.0, 10.0, 100.0]
r = 0
for j in range(4):
zj = ((floor((abs((x[j] / 0.2)) + 0.49999)) * sign(x[j])) * 0.2)
if (abs((x[j] - zj)) < 0.05):
r += ((0.15 * ((zj - (0.05 * sign(zj))) ** 2)) * d[j])
else:
r += ((d[j] * x[j]) * x[j])
return r |
def test_rpow():
value = 2
proxy = tt.ObjectProxy(value)
assert ((3 ** value) == (3 ** proxy))
assert (int in tt.UsageTraceNode.from_proxy(proxy).children['__rpow__'].arg_types[0]) |
class attentionNet(nn.Module):
def __init__(self, squeezeFilters=64, expandFilters=64, depth=3):
super(attentionNet, self).__init__()
self.inputConv = nn.Conv2d(3, squeezeFilters, 3, 1, 1)
depthAttenBlock = []
for i in range(depth):
depthAttenBlock.append(attentionGuidedResBlock(squeezeFilters, expandFilters))
self.depthAttention1 = nn.Sequential(*depthAttenBlock)
self.spatialAttention1 = SpatialAttentionBlock(squeezeFilters)
self.down1 = nn.Conv2d(64, 128, 3, 2, 1)
depthAttenBlock1 = []
for i in range(depth):
depthAttenBlock1.append(attentionGuidedResBlock(128, 128, dilationRate=1))
self.depthAttention2 = nn.Sequential(*depthAttenBlock1)
self.spatialAttention2 = SpatialAttentionBlock(128)
self.down2 = nn.Conv2d(128, 256, 3, 2, 1)
depthAttenBlock3 = []
for i in range(depth):
depthAttenBlock3.append(attentionGuidedResBlock(256, 256, dilationRate=1))
self.depthAttention3 = nn.Sequential(*depthAttenBlock3)
self.spatialAttention3 = SpatialAttentionBlock(256)
self.convUP1 = nn.Conv2d(256, 128, 3, 1, 1)
self.psUpsampling1 = pixelShuffleUpsampling(inputFilters=128, scailingFactor=2)
depthAttenBlock4 = []
for i in range(depth):
depthAttenBlock4.append(attentionGuidedResBlock(128, 128, dilationRate=1))
self.depthAttention4 = nn.Sequential(*depthAttenBlock4)
self.spatialAttention4 = SpatialAttentionBlock(128)
self.convUP2 = nn.Conv2d(128, 64, 3, 1, 1)
self.psUpsampling2 = pixelShuffleUpsampling(inputFilters=64, scailingFactor=2)
depthAttenBlock5 = []
for i in range(depth):
depthAttenBlock5.append(attentionGuidedResBlock(64, 64, dilationRate=1))
self.depthAttention5 = nn.Sequential(*depthAttenBlock5)
self.spatialAttention5 = SpatialAttentionBlock(64)
self.convOut = nn.Conv2d(squeezeFilters, 3, 1)
def forward(self, img):
xInp = F.leaky_relu(self.inputConv(img))
xSP1 = self.depthAttention1(xInp)
xFA1 = F.leaky_relu(self.spatialAttention1(xSP1))
xDS1 = F.leaky_relu(self.down1(xFA1))
xSP2 = self.depthAttention2(xDS1)
xFA2 = self.spatialAttention2(xSP2)
xDS2 = F.leaky_relu(self.down2(xFA2))
xSP3 = self.depthAttention3(xDS2)
xFA3 = self.spatialAttention3(xSP3)
xCP1 = F.leaky_relu(self.convUP1(xFA3))
xPS1 = self.psUpsampling1(xCP1)
xSP4 = self.depthAttention4(xPS1)
xFA4 = (self.spatialAttention4(xSP4) + xFA2)
xCP2 = F.leaky_relu(self.convUP2(xFA4))
xPS2 = self.psUpsampling2(xCP2)
xSP5 = self.depthAttention5(xPS2)
xFA5 = (self.spatialAttention5(xSP5) + xFA1)
return torch.tanh((self.convOut(xFA5) + img))
def _initialize_weights(self):
self.inputConv.apply(init_weights)
self.depthAttention1.apply(init_weights)
self.spatialAttention1.apply(init_weights)
self.down1.apply(init_weights)
self.depthAttention2.apply(init_weights)
self.spatialAttention2.apply(init_weights)
self.down2.apply(init_weights)
self.depthAttention3.apply(init_weights)
self.spatialAttention3.apply(init_weights)
self.convUP1.apply(init_weights)
self.psUpsampling1.apply(init_weights)
self.depthAttention4.apply(init_weights)
self.spatialAttention4.apply(init_weights)
self.convUP2.apply(init_weights)
self.psUpsampling2.apply(init_weights)
self.depthAttention5.apply(init_weights)
self.spatialAttention5.apply(init_weights)
self.convOut.apply(init_weights) |
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('std::string', 'typeId')])
cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True)
cls.add_method('IsTypeIdSet', 'bool', [], is_const=True)
cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')])
cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')])
cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')])
return |
def get_data_iterator_and_num_class(args):
if args.train_csv:
from nnabla.utils.data_iterator import data_iterator_csv_dataset
data_iterator = data_iterator_csv_dataset
if args.test_csv:
assert os.path.isfile(args.test_csv), 'csv file for test not found.'
with open(args.train_csv, 'r') as f:
csv_data_train = f.readlines()[1:]
classes_train = {line.split(',')[(- 1)].strip() for line in csv_data_train}
with open(args.test_csv, 'r') as f:
csv_data_test = f.readlines()[1:]
classes_test = {line.split(',')[(- 1)].strip() for line in csv_data_test}
classes_train.update(classes_test)
num_class = len(classes_train)
data_iterator_train = data_iterator_csv_dataset(args.train_csv, args.batch_size, args.shuffle, normalize=False)
data_iterator_valid = data_iterator_csv_dataset(args.test_csv, args.batch_size, args.shuffle, normalize=False)
else:
print('No csv file for test given. So split the training data')
assert isintance(args.ratio, float), 'ratio must be in (0.0, 1.0)'
with open(args.train_csv, 'r') as f:
csv_data_train = f.readlines()[1:]
all_classes = {line.split(',')[(- 1)].strip() for line in csv_data_train}
num_class = len(all_classes)
all_data = data_iterator_csv_dataset(args.train_csv, args.batch_size, args.shuffle, normalize=False)
num_samples = all_data.size
num_train_samples = int((args.ratio * num_samples))
data_iterator_train = all_data.slice(rng=None, slice_start=0, slice_end=num_train_samples)
data_iterator_valid = all_data.slice(rng=None, slice_start=num_train_samples, slice_end=num_samples)
else:
from caltech101_data import data_iterator_caltech101
assert isintance(args.ratio, float), 'ratio must be in (0.0, 1.0)'
data_iterator = data_iterator_caltech101
num_class = 101
all_data = data_iterator(args.batch_size, width=args.width, height=args.height)
num_samples = all_data.size
num_train_samples = int((args.ratio * num_samples))
data_iterator_train = all_data.slice(rng=None, slice_start=0, slice_end=num_train_samples)
data_iterator_valid = all_data.slice(rng=None, slice_start=num_train_samples, slice_end=num_samples)
print('training images: {}'.format(data_iterator_train.size))
print('validation images: {}'.format(data_iterator_valid.size))
print('{} categories included.'.format(num_class))
return (data_iterator_train, data_iterator_valid, num_class) |
def main():
args = parse_args()
assert (args.out or args.show), 'Please specify at least one operation (save/show the video) with the argument "--out" or "--show"'
model = init_detector(args.config, args.checkpoint, device=args.device)
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(args.out, fourcc, video_reader.fps, (video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame)
frame = model.show_result(frame, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows() |
def inparams(params):
ip = []
for param in params:
if is_in_param(param):
ip.append(param)
return ip |
def eisenstein_series_lseries(weight, prec=53, max_imaginary_part=0, max_asymp_coeffs=40):
f = eisenstein_series_qexp(weight, prec)
from sage.lfunctions.all import Dokchitser
j = weight
L = Dokchitser(conductor=1, gammaV=[0, 1], weight=j, eps=((- 1) ** Integer((j // 2))), poles=[j], residues=('[sqrt(Pi)*(%s)]' % ((((- 1) ** Integer((j // 2))) * bernoulli(j)) / j)), prec=prec)
s = ('coeff = %s;' % f.list())
L.init_coeffs('coeff[k+1]', pari_precode=s, max_imaginary_part=max_imaginary_part, max_asymp_coeffs=max_asymp_coeffs)
L.check_functional_equation()
L.rename(('L-series associated to the weight %s Eisenstein series %s on SL_2(Z)' % (j, f)))
return L |
class Mean(Sum):
def __init__(self, dimension):
super(Mean, self).__init__(dimension, True) |
class OPTInt8(CausalInt8Model):
config_name: str = 'opt_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(OPTInt8Engine.config_name, weights_path) |
_task_model('remote_homology', 'resnet')
class ProteinResNetForSequenceClassification(ProteinResNetAbstractModel):
def __init__(self, config):
super().__init__(config)
self.resnet = ProteinResNetModel(config)
self.classify = SequenceClassificationHead(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, input_mask=None, targets=None):
outputs = self.resnet(input_ids, input_mask=input_mask)
(sequence_output, pooled_output) = outputs[:2]
outputs = (self.classify(pooled_output, targets) + outputs[2:])
return outputs |
class JunctorCondition(Condition):
__hash__ = Condition.__hash__
def __eq__(self, other):
return ((self.hash == other.hash) and (self.__class__ is other.__class__) and (self.parts == other.parts))
def change_parts(self, parts):
return self.__class__(parts) |
def load_default_identifiers(n, g, l):
if (n is None):
n = n_identifier
if (g is None):
g = g_identifier
if (l is None):
l = l_identifier
return (n, g, l) |
_version(mp, '0.19')
def test_gammainc():
assert_mpmath_equal(gammainc, (lambda a, x: mp.gammainc(a, b=x, regularized=True)), [Arg(0, 100, inclusive_a=False), Arg(0, 100)], nan_ok=False, rtol=1e-17, n=50, dps=50) |
def write_summary_results(summaries, cls, output_folder):
fields = sum([list(s.keys()) for s in summaries], [])
values = sum([list(s.values()) for s in summaries], [])
default_order = ['HOTA', 'DetA', 'AssA', 'DetRe', 'DetPr', 'AssRe', 'AssPr', 'LocA', 'RHOTA', 'HOTA(0)', 'LocA(0)', 'HOTALocA(0)', 'MOTA', 'MOTP', 'MODA', 'CLR_Re', 'CLR_Pr', 'MTR', 'PTR', 'MLR', 'CLR_TP', 'CLR_FN', 'CLR_FP', 'IDSW', 'MT', 'PT', 'ML', 'Frag', 'sMOTA', 'IDF1', 'IDR', 'IDP', 'IDTP', 'IDFN', 'IDFP', 'Dets', 'GT_Dets', 'IDs', 'GT_IDs']
default_ordered_dict = OrderedDict(zip(default_order, [None for _ in default_order]))
for (f, v) in zip(fields, values):
default_ordered_dict[f] = v
for df in default_order:
if (default_ordered_dict[df] is None):
del default_ordered_dict[df]
fields = list(default_ordered_dict.keys())
values = list(default_ordered_dict.values())
out_file = os.path.join(output_folder, (cls + '_summary.txt'))
os.makedirs(os.path.dirname(out_file), exist_ok=True)
with open(out_file, 'w', newline='') as f:
writer = csv.writer(f, delimiter=' ')
writer.writerow(fields)
writer.writerow(values) |
def scaled_sigmoid(x, scale=2.5):
vals = (np.tanh((scale * (1 - x))) / np.tanh(scale)).reshape((- 1))
return np.where((x > 2.0), (- 1.0), np.where((x < 0), 1.0, vals)) |
def predict(path):
img = Image.open(path).resize((224, 224))
x = np.array(img)
if (len(x.shape) == 2):
x = np.stack(([x] * 3), 2)
else:
pass
x = ((x - x.mean()) / x.std())
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
np.sort(preds)
print("Model's top 3 predicted:")
top3 = np.argsort((- preds[0]))[:3]
return [classes[i] for i in top3] |
class CustomProcessor(ProcessorMixin):
feature_extractor_class = 'AutoFeatureExtractor'
tokenizer_class = 'AutoTokenizer' |
def save_model(model, model_dir, params, net_params, optimizer, epoch, ID='model.pkl'):
checkpoint_dict = {'model': model.state_dict(), 'epoch': epoch, 'name': str(model), 'optimizer': optimizer.state_dict(), 'metadata': {'date': datetime.now().strftime('%Y-%m-%d'), 'params': params, 'net_params': net_params}}
if (model_dir is not None):
torch.save(checkpoint_dict, f'{model_dir}/{ID}') |
def online_learning_misp_perfect(user, agent, online_data_loader, train_table, update_iter, model_save_path, record_save_path, max_seq_length=222, num_target_layers=2, st_pos=0, end_pos=(- 1)):
assert (args.ask_structure and (args.user == 'gold_sim') and (args.err_detector == 'perfect'))
cnt = 0
interaction_records = []
count_exit = 0
count_failure = 0
count_iter = 0
num_total_examples = len(online_data_loader.dataset)
if (st_pos > 0):
print(('Loading interaction records from %s...' % record_save_path))
interaction_records = json.load(open(record_save_path, 'r'))
print(('Record item size: %d ' % len(interaction_records)))
dset_name = 'train'
for (iB, t) in enumerate(online_data_loader):
cnt += len(t)
assert (len(t) == 1)
if (len(interaction_records) >= cnt):
record = interaction_records[(cnt - 1)]
if ('sql_i' not in record):
continue
count_iter += 1
else:
(nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds) = get_fields(t, train_table, no_hs_t=True, no_sql_t=True)
g_sql_q = generate_sql_q(sql_i, tb)
(g_sc, g_sa, g_wn, g_wc, g_wo, g_wv) = get_g(sql_i)
g_wvi_corenlp = get_g_wvi_corenlp(t)
(wemb_n, wemb_h, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx) = get_wemb_bert(agent.world_model.bert_config, agent.world_model.model_bert, agent.world_model.tokenizer, nlu_t, hds, max_seq_length, num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
try:
g_wvi = get_g_wvi_bert_from_g_wvi_corenlp(t_to_tt_idx, g_wvi_corenlp)
(g_wv_str, g_wv_str_wp) = convert_pr_wvi_to_string(g_wvi, nlu_t, nlu_tt, tt_to_t_idx, nlu)
except:
count_failure += 1
print(('## Failure %d' % count_failure))
interaction_records.append({'nl': t[0]['question'], 'true_sql': g_sql_q[0], 'true_sql_i': '{}'.format(sql_i[0]), 'questioned_indices': [], 'q_counter': 0, 'count_additional_q': 0})
continue
print(('\n' + ('#' * 50)))
print('NL input: {}\nTrue SQL: {}'.format(t[0]['question'], g_sql_q[0]))
if isinstance(agent.error_detector, ErrorDetectorBayesDropout):
input_item = [tb, nlu_t, nlu, hds]
else:
input_item = [wemb_n, l_n, wemb_h, l_hpu, l_hs, tb, nlu_t, nlu_tt, tt_to_t_idx, nlu]
hyp = agent.world_model.decode(input_item, dec_beam_size=1, bool_verbal=False)[0]
print((('-' * 50) + '\nBefore interaction: \ninitial SQL: {}'.format(hyp.sql)))
g_sql = sql_i[0]
g_sql['g_wvi'] = g_wvi[0]
(hyp, bool_exit) = agent.interactive_parsing_session(user, input_item, g_sql, hyp, bool_verbal=False)
print((('-' * 50) + '\nAfter interaction:\nfinal SQL: {}'.format(hyp.sql)))
Hypothesis.print_hypotheses([hyp])
assert (hyp.sql_i['sel'] == sql_i[0]['sel'])
assert (hyp.sql_i['agg'] == sql_i[0]['agg'])
count_additional_q = 0
if (len(hyp.sql_i['conds']) < len(sql_i[0]['conds'])):
count_additional_q += ((len(sql_i[0]['conds']) - len(hyp.sql_i['conds'])) * 3)
elif (len(hyp.sql_i['conds']) > len(sql_i[0]['conds'])):
for (col, op, val) in hyp.sql_i['conds']:
if (col not in [_col for (_col, _op, _val) in sql_i[0]['conds']]):
count_additional_q += 3
elif ((col, op) not in [(_col, _op) for (_col, _op, _val) in sql_i[0]['conds']]):
count_additional_q += 2
elif ((col, op, val) not in [(_col, _op, _val) for (_col, _op, _val) in sql_i[0]['conds']]):
count_additional_q += 1
print('count_additional_q: {}'.format(count_additional_q))
record = {'nl': t[0]['question'], 'true_sql': g_sql_q[0], 'true_sql_i': '{}'.format(sql_i[0]), 'sql': hyp.sql, 'sql_i': '{}'.format(hyp.sql_i), 'dec_seq': '{}'.format(hyp.dec_seq), 'tag_seq': '{}'.format(hyp.tag_seq), 'logprob': '{}'.format(hyp.logprob), 'exit': bool_exit, 'q_counter': user.q_counter, 'count_additional_q': count_additional_q, 'questioned_indices': user.questioned_pointers, 'questioned_tags': '{}'.format(user.questioned_tags), 'feedback_records': '{}'.format(user.feedback_records)}
interaction_records.append(record)
if bool_exit:
count_exit += 1
count_iter += 1
del wemb_n, wemb_h
if (((count_iter % update_iter) == 0) or (count_iter == num_total_examples)):
if (count_iter < st_pos):
continue
if (count_iter > st_pos):
q_count = sum([(item['q_counter'] + item['count_additional_q']) for item in interaction_records])
print('## End update at iter {}, anno_cost {}\n'.format(count_iter, q_count))
print(('Saving interaction records to %s...' % record_save_path))
json.dump(interaction_records, open(record_save_path, 'w'), indent=4)
if ((end_pos != (- 1)) and (count_iter == end_pos)):
print('## Ending online learning at iter {}\n'.format(end_pos))
print(datetime.datetime.now())
break
model_dir = os.path.join(model_save_path, ('%d/' % count_iter))
print(('Loading model from %s...' % model_dir))
path_model = os.path.join(model_dir, 'model_best.pt')
path_model_bert = os.path.join(model_dir, 'model_bert_best.pt')
if torch.cuda.is_available():
res = torch.load(path_model_bert)
else:
res = torch.load(path_model_bert, map_location='cpu')
agent.world_model.model_bert.load_state_dict(res['model_bert'])
agent.world_model.model_bert.to(device)
if torch.cuda.is_available():
res = torch.load(path_model)
else:
res = torch.load(path_model, map_location='cpu')
agent.world_model.semparser.load_state_dict(res['model'])
print(datetime.datetime.now())
print(('Saving interaction records to %s...' % record_save_path))
json.dump(interaction_records, open(record_save_path, 'w'), indent=4) |
def test_explorer():
tmon1 = scq.TunableTransmon(EJmax=40.0, EC=0.2, d=0.1, flux=0.0, ng=0.3, ncut=40, truncated_dim=5)
tmon2 = scq.TunableTransmon(EJmax=15.0, EC=0.15, d=0.02, flux=0.0, ng=0.0, ncut=30, truncated_dim=5)
resonator = scq.Oscillator(E_osc=4.5, truncated_dim=4)
hilbertspace = scq.HilbertSpace([tmon1, tmon2, resonator])
g1 = 0.1
g2 = 0.2
hilbertspace.add_interaction(g_strength=g1, op1=tmon1.n_operator, op2=resonator.creation_operator, add_hc=True)
hilbertspace.add_interaction(g_strength=g2, op1=tmon2.n_operator, op2=resonator.creation_operator, add_hc=True)
pname1 = 'flux'
flux_vals = np.linspace(0.0, 2.0, 3)
pname2 = 'ng'
ng_vals = np.linspace((- 0.5), 0.5, 3)
paramvals_by_name = {pname1: flux_vals, pname2: ng_vals}
area_ratio = 1.2
def update_hilbertspace(flux, ng):
tmon1.flux = flux
tmon2.flux = (area_ratio * flux)
tmon2.ng = ng
subsys_update_info = {pname1: [tmon1, tmon2], pname2: [tmon2]}
sweep = scq.ParameterSweep(hilbertspace=hilbertspace, paramvals_by_name=paramvals_by_name, update_hilbertspace=update_hilbertspace, evals_count=28, subsys_update_info=subsys_update_info, num_cpus=4)
expl = scq.Explorer(sweep) |
class TestDomain(object):
def test_getdomain(self):
x = [1, 10, 3, (- 1)]
tgt = [(- 1), 10]
res = pu.getdomain(x)
assert_almost_equal(res, tgt)
x = [(1 + 1j), (1 - 1j), 0, 2]
tgt = [(- 1j), (2 + 1j)]
res = pu.getdomain(x)
assert_almost_equal(res, tgt)
def test_mapdomain(self):
dom1 = [0, 4]
dom2 = [1, 3]
tgt = dom2
res = pu.mapdomain(dom1, dom1, dom2)
assert_almost_equal(res, tgt)
dom1 = [(0 - 1j), (2 + 1j)]
dom2 = [(- 2), 2]
tgt = dom2
x = dom1
res = pu.mapdomain(x, dom1, dom2)
assert_almost_equal(res, tgt)
dom1 = [0, 4]
dom2 = [1, 3]
tgt = np.array([dom2, dom2])
x = np.array([dom1, dom1])
res = pu.mapdomain(x, dom1, dom2)
assert_almost_equal(res, tgt)
class MyNDArray(np.ndarray):
pass
dom1 = [0, 4]
dom2 = [1, 3]
x = np.array([dom1, dom1]).view(MyNDArray)
res = pu.mapdomain(x, dom1, dom2)
assert_(isinstance(res, MyNDArray))
def test_mapparms(self):
dom1 = [0, 4]
dom2 = [1, 3]
tgt = [1, 0.5]
res = pu.mapparms(dom1, dom2)
assert_almost_equal(res, tgt)
dom1 = [(0 - 1j), (2 + 1j)]
dom2 = [(- 2), 2]
tgt = [((- 1) + 1j), (1 - 1j)]
res = pu.mapparms(dom1, dom2)
assert_almost_equal(res, tgt) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.