code stringlengths 281 23.7M |
|---|
class FTraceCallGraph():
vfname = 'missing_function_name'
def __init__(self, pid, sv):
self.id = ''
self.invalid = False
self.name = ''
self.partial = False
self.ignore = False
self.start = (- 1.0)
self.end = (- 1.0)
self.list = []
self.depth = 0
self.pid = pid
self.sv = sv
def addLine(self, line):
if self.invalid:
if ((line.depth == 0) and line.freturn):
return 1
return 0
if (self.depth < 0):
self.invalidate(line)
return 0
if self.ignore:
if (line.depth > self.depth):
return 0
else:
self.list[(- 1)].freturn = True
self.list[(- 1)].length = (line.time - self.list[(- 1)].time)
self.ignore = False
if ((line.depth == self.depth) and line.isReturn()):
if (line.depth == 0):
self.end = line.time
return 1
return 0
prelinedep = line.depth
if line.isReturn():
prelinedep += 1
last = 0
lasttime = line.time
if (len(self.list) > 0):
last = self.list[(- 1)]
lasttime = last.time
if last.isLeaf():
lasttime += last.length
mismatch = (prelinedep - self.depth)
warning = (self.sv.verbose and (abs(mismatch) > 1))
info = []
if (mismatch < 0):
idx = 0
while (prelinedep < self.depth):
self.depth -= 1
if ((idx == 0) and last and last.isCall()):
last.depth = self.depth
last.freturn = True
last.length = (line.time - last.time)
if warning:
info.append(('[make leaf]', last))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.freturn = True
self.list.append(vline)
if warning:
if (idx == 0):
info.append(('', last))
info.append(('[add return]', vline))
idx += 1
if warning:
info.append(('', line))
elif (mismatch > 0):
idx = 0
if warning:
info.append(('', last))
while (prelinedep > self.depth):
if ((idx == 0) and line.isReturn()):
line.fcall = True
prelinedep -= 1
if warning:
info.append(('[make leaf]', line))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.fcall = True
self.list.append(vline)
self.depth += 1
if (not last):
self.start = vline.time
if warning:
info.append(('[add call]', vline))
idx += 1
if (warning and (('[make leaf]', line) not in info)):
info.append(('', line))
if warning:
pprint('WARNING: ftrace data missing, corrections made:')
for i in info:
(t, obj) = i
if obj:
obj.debugPrint(t)
skipadd = False
md = self.sv.max_graph_depth
if line.isCall():
if ((md and (self.depth >= (md - 1))) or (line.name in self.sv.cgblacklist)):
self.ignore = True
else:
self.depth += 1
elif line.isReturn():
self.depth -= 1
if ((last and last.isCall() and (last.depth == line.depth)) or (md and last and (last.depth >= md)) or (line.name in self.sv.cgblacklist)):
while ((len(self.list) > 0) and (self.list[(- 1)].depth > line.depth)):
self.list.pop((- 1))
if (len(self.list) == 0):
self.invalid = True
return 1
self.list[(- 1)].freturn = True
self.list[(- 1)].length = (line.time - self.list[(- 1)].time)
self.list[(- 1)].name = line.name
skipadd = True
if (len(self.list) < 1):
self.start = line.time
res = 1
if ((mismatch < 0) and (self.list[(- 1)].depth == 0) and self.list[(- 1)].freturn):
line = self.list[(- 1)]
skipadd = True
res = (- 1)
if (not skipadd):
self.list.append(line)
if ((line.depth == 0) and line.freturn):
if (self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if (self.list[0].name == self.vfname):
self.invalid = True
if (res == (- 1)):
self.partial = True
return res
return 0
def invalidate(self, line):
if (len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = ('task %s' % self.pid)
window = ('(%f - %f)' % (self.start, line.time))
if (self.depth < 0):
pprint((('Data misalignment for ' + id) + ' (buffer overflow), ignoring this callback'))
else:
pprint((((('Too much data for ' + id) + ' ') + window) + ', ignoring this callback'))
def slice(self, dev):
minicg = FTraceCallGraph(dev['pid'], self.sv)
minicg.name = self.name
mydepth = (- 1)
good = False
for l in self.list:
if ((l.time < dev['start']) or (l.time > dev['end'])):
continue
if (mydepth < 0):
if ((l.name == 'mutex_lock') and l.freturn):
mydepth = l.depth
continue
elif ((l.depth == mydepth) and (l.name == 'mutex_unlock') and l.fcall):
good = True
break
l.depth -= mydepth
minicg.addLine(l)
if ((not good) or (len(minicg.list) < 1)):
return 0
return minicg
def repair(self, enddepth):
fixed = False
last = self.list[(- 1)]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if (fixed != 0):
self.end = last.time
return True
return False
def postProcess(self):
if (len(self.list) > 0):
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
if (last and last.isLeaf()):
if (last.length > (l.time - last.time)):
last.length = (l.time - last.time)
if l.isCall():
stack[l.depth] = l
cnt += 1
elif l.isReturn():
if (l.depth not in stack):
if self.sv.verbose:
pprint('Post Process Error: Depth missing')
l.debugPrint()
return False
cl = stack[l.depth]
cl.length = (l.time - cl.time)
if (cl.name == self.vfname):
cl.name = l.name
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if (cnt == 0):
return True
elif (cnt < 0):
if self.sv.verbose:
pprint('Post Process Error: Depth is less than 0')
return False
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = ''
borderphase = {'dpm_prepare': 'suspend_prepare', 'dpm_complete': 'resume_complete'}
if (self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if ((pid == dev['pid']) and (self.start <= dev['start']) and (self.end >= dev['end'])):
cg = self.slice(dev)
if cg:
dev['ftrace'] = cg
found = devname
return found
for p in data.sortedPhases():
if ((data.dmesg[p]['start'] <= self.start) and (self.start <= data.dmesg[p]['end'])):
list = data.dmesg[p]['list']
for devname in sorted(list, key=(lambda k: list[k]['start'])):
dev = list[devname]
if ((pid == dev['pid']) and (self.start <= dev['start']) and (self.end >= dev['end'])):
dev['ftrace'] = self
found = devname
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if (name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']):
return
fs = self.start
fe = self.end
if ((fs < data.start) or (fe > data.end)):
return
phase = ''
for p in data.sortedPhases():
if ((data.dmesg[p]['start'] <= self.start) and (self.start < data.dmesg[p]['end'])):
phase = p
break
if (not phase):
return
out = data.newActionGlobal(name, fs, fe, (- 2))
if out:
(phase, myname) = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self, info=''):
pprint(('%s pid=%d [%f - %f] %.3f us' % (self.name, self.pid, self.start, self.end, ((self.end - self.start) * 1000000))))
for l in self.list:
if l.isLeaf():
pprint(('%f (%02d): %s(); (%.3f us)%s' % (l.time, l.depth, l.name, (l.length * 1000000), info)))
elif l.freturn:
pprint(('%f (%02d): %s} (%.3f us)%s' % (l.time, l.depth, l.name, (l.length * 1000000), info)))
else:
pprint(('%f (%02d): %s() { (%.3f us)%s' % (l.time, l.depth, l.name, (l.length * 1000000), info)))
pprint(' ') |
class CustomTransform(TransformComponent):
def __init__(self, transformer: Callable[(..., Any)], **kwargs: Any):
super().__init__()
self.transformer = transformer
self.transformer__kwargs = kwargs
def transformer(self) -> Callable[(..., Any)]:
return self._transformer
def transformer(self, method: Callable[(..., Any)]) -> None:
if (not method):
raise ValueError('A method must be provided to CustomTransform')
self._transformer = method
def output_columns(self) -> List[str]:
return [self._parent.name]
def transform(self, dataframe: DataFrame) -> DataFrame:
dataframe = self.transformer(dataframe, self.parent, **self.transformer__kwargs)
return dataframe |
def test_drop_last_false():
data = pd.DataFrame({'1': (['a', 'b', 'c'] * 150), '2': (['a', 'b', 'c'] * 150)})
tvae = TVAESynthesizer(epochs=300)
tvae.fit(data, ['1', '2'])
sampled = tvae.sample(100)
correct = 0
for (_, row) in sampled.iterrows():
if (row['1'] == row['2']):
correct += 1
assert (correct >= 95) |
class Mesh(dict):
def __init__(self, geometry, submesh_types, var_pts):
super().__init__()
self.geometry = geometry
var_pts_input = var_pts
var_pts = {}
for (key, value) in var_pts_input.items():
if isinstance(key, str):
key = getattr(pybamm.standard_spatial_vars, key)
var_pts[key] = value
var_name_pts = {var.name: pts for (var, pts) in var_pts.items()}
submesh_pts = {}
for domain in geometry:
if ((not isinstance(submesh_types[domain], pybamm.MeshGenerator)) and issubclass(submesh_types[domain], pybamm.SubMesh)):
submesh_types[domain] = pybamm.MeshGenerator(submesh_types[domain])
if issubclass(submesh_types[domain].submesh_type, pybamm.SubMesh0D):
submesh_pts[domain] = 1
else:
submesh_pts[domain] = {}
if (len(list(geometry[domain].keys())) > 3):
raise pybamm.GeometryError('Too many keys provided')
for var in list(geometry[domain].keys()):
if (var in ['primary', 'secondary']):
raise pybamm.GeometryError("Geometry should no longer be given keys 'primary' or 'secondary'. See pybamm.battery_geometry() for example")
if (var != 'tabs'):
if isinstance(var, str):
var = getattr(pybamm.standard_spatial_vars, var)
if ((var.name not in var_name_pts.keys()) and (var.domain[0] in geometry.keys())):
raise KeyError(f"Points not given for a variable in domain '{domain}'")
submesh_pts[domain][var.name] = var_name_pts[var.name]
self.submesh_pts = submesh_pts
for domain in geometry:
for (spatial_variable, spatial_limits) in geometry[domain].items():
if (spatial_variable == 'tabs'):
for (tab, position_size) in spatial_limits.items():
for (position_size, sym) in position_size.items():
if isinstance(sym, pybamm.Symbol):
sym_eval = sym.evaluate()
geometry[domain]['tabs'][tab][position_size] = sym_eval
else:
for (lim, sym) in spatial_limits.items():
if isinstance(sym, pybamm.Symbol):
try:
sym_eval = sym.evaluate()
except NotImplementedError as error:
if sym.has_symbol_of_classes(pybamm.Parameter):
raise pybamm.DiscretisationError('Parameter values have not yet been set for geometry. Make sure that something like `param.process_geometry(geometry)` has been run.')
else:
raise error
elif isinstance(sym, numbers.Number):
sym_eval = sym
geometry[domain][spatial_variable][lim] = sym_eval
self.base_domains = []
for domain in geometry:
self[domain] = submesh_types[domain](geometry[domain], submesh_pts[domain])
self.base_domains.append(domain)
self.add_ghost_meshes()
def _from_json(cls, snippet: dict):
instance = cls.__new__(cls)
super(Mesh, instance).__init__()
instance.submesh_pts = snippet['submesh_pts']
instance.base_domains = snippet['base_domains']
for (k, v) in snippet['sub_meshes'].items():
instance[k] = v
return instance
def __getitem__(self, domains):
if isinstance(domains, str):
domains = (domains,)
domains = tuple(domains)
try:
return super().__getitem__(domains)
except KeyError:
value = self.combine_submeshes(*domains)
self[domains] = value
return value
def __setitem__(self, domains, value):
if isinstance(domains, str):
domains = (domains,)
super().__setitem__(domains, value)
def combine_submeshes(self, *submeshnames):
if (submeshnames == ()):
raise ValueError('Submesh domains being combined cannot be empty')
for i in range((len(submeshnames) - 1)):
if (self[submeshnames[i]].edges[(- 1)] != self[submeshnames[(i + 1)]].edges[0]):
raise pybamm.DomainError('submesh edges are not aligned')
coord_sys = self[submeshnames[i]].coord_sys
coord_sys_r = self[submeshnames[(i + 1)]].coord_sys
if (coord_sys != coord_sys_r):
raise pybamm.DomainError('trying to combine two meshes in different coordinate systems')
combined_submesh_edges = np.concatenate(([self[submeshnames[0]].edges] + [self[submeshname].edges[1:] for submeshname in submeshnames[1:]]))
coord_sys = self[submeshnames[0]].coord_sys
submesh = pybamm.SubMesh1D(combined_submesh_edges, coord_sys)
submesh.internal_boundaries = [self[submeshname].edges[0] for submeshname in submeshnames[1:]]
return submesh
def add_ghost_meshes(self):
submeshes = [(domain, submesh) for (domain, submesh) in self.items() if ((len(domain) == 1) and (not isinstance(submesh, (pybamm.SubMesh0D, pybamm.ScikitSubMesh2D))))]
for (domain, submesh) in submeshes:
edges = submesh.edges
lgs_edges = np.array([((2 * edges[0]) - edges[1]), edges[0]])
self[(domain[0] + '_left ghost cell')] = pybamm.SubMesh1D(lgs_edges, submesh.coord_sys)
rgs_edges = np.array([edges[(- 1)], ((2 * edges[(- 1)]) - edges[(- 2)])])
self[(domain[0] + '_right ghost cell')] = pybamm.SubMesh1D(rgs_edges, submesh.coord_sys)
def geometry(self):
return self._geometry
def geometry(self, geometry):
self._geometry = geometry
def to_json(self):
json_dict = {'submesh_pts': self.submesh_pts, 'base_domains': self.base_domains}
return json_dict |
def squared_l1_prox_pos(x, step=1, weights=None, check=False):
if check:
assert all((x >= 0))
if (weights is None):
weights = np.ones_like(x)
x_over_w = (x / weights)
decr_sort_idxs = np.argsort(x_over_w)[::(- 1)]
x_sort = x[decr_sort_idxs]
weights_sort = weights[decr_sort_idxs]
s = np.cumsum((x_sort * weights_sort))
L = np.cumsum((weights_sort ** 2))
alpha_bar = max((s / (1 + ((2 * step) * L))))
thresh = (((2 * step) * alpha_bar) * weights)
return np.maximum((x - thresh), 0) |
class TestSquareLattice(QiskitNatureTestCase):
def test_init(self):
rows = 3
cols = 2
edge_parameter = ((1.0 + 1j), (2.0 + 2j))
onsite_parameter = 1.0
boundary_condition = (BoundaryCondition.PERIODIC, BoundaryCondition.OPEN)
square = SquareLattice(rows, cols, edge_parameter, onsite_parameter, boundary_condition)
with self.subTest('Check the graph.'):
target_graph = PyGraph(multigraph=False)
target_graph.add_nodes_from(range(6))
weighted_edge_list = [(0, 1, (1.0 + 1j)), (1, 2, (1.0 + 1j)), (0, 2, (1.0 - 1j)), (3, 4, (1.0 + 1j)), (4, 5, (1.0 + 1j)), (3, 5, (1.0 - 1j)), (0, 3, (2.0 + 2j)), (1, 4, (2.0 + 2j)), (2, 5, (2.0 + 2j)), (0, 0, 1.0), (1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0), (4, 4, 1.0), (5, 5, 1.0)]
target_graph.add_edges_from(weighted_edge_list)
self.assertTrue(is_isomorphic(square.graph, target_graph, edge_matcher=(lambda x, y: (x == y))))
with self.subTest('Check the number of nodes.'):
self.assertEqual(square.num_nodes, 6)
with self.subTest('Check the set of nodes.'):
self.assertSetEqual(set(square.node_indexes), set(range(6)))
with self.subTest('Check the set of weights.'):
target_set = {(0, 1, (1.0 + 1j)), (1, 2, (1.0 + 1j)), (0, 2, (1.0 - 1j)), (3, 4, (1.0 + 1j)), (4, 5, (1.0 + 1j)), (3, 5, (1.0 - 1j)), (0, 3, (2.0 + 2j)), (1, 4, (2.0 + 2j)), (2, 5, (2.0 + 2j)), (0, 0, 1.0), (1, 1, 1.0), (2, 2, 1.0), (3, 3, 1.0), (4, 4, 1.0), (5, 5, 1.0)}
self.assertSetEqual(set(square.weighted_edge_list), target_set)
with self.subTest('Check the adjacency matrix.'):
target_matrix = np.array([[1.0, (1.0 + 1j), (1.0 - 1j), (2.0 + 2j), 0.0, 0.0], [(1.0 - 1j), 1.0, (1.0 + 1j), 0.0, (2.0 + 2j), 0.0], [(1.0 + 1j), (1.0 - 1j), 1.0, 0.0, 0.0, (2.0 + 2j)], [(2.0 - 2j), 0.0, 0.0, 1.0, (1.0 + 1j), (1.0 - 1j)], [0.0, (2.0 - 2j), 0.0, (1.0 - 1j), 1.0, (1.0 + 1j)], [0.0, 0.0, (2.0 - 2j), (1.0 + 1j), (1.0 - 1j), 1.0]])
assert_array_equal(square.to_adjacency_matrix(weighted=True), target_matrix) |
def find_node_with_resource(resource: ResourceInfo, context: NodeContext, haystack: Iterator[Node]) -> ResourceNode:
for node in haystack:
if (isinstance(node, ResourceNode) and (node.resource(context) == resource)):
return node
raise ValueError(f'Could not find a node with resource {resource}') |
def digui(newone, s, yuzhi, www, R, mui):
m = []
numone = []
loo = 0
looc = [[]]
for i in newone:
for j in www:
sample = str((i + j))
mui = (mui + 1)
n = diguipd(s, sample, R)
if ((n >= yuzhi) and (sample not in m)):
if (n > loo):
loo = n
looc = [[]]
looc[0] = sample
elif (n == loo):
looc.append(sample)
m.append(sample)
numone.append(n)
if len(m):
return (m, 0, numone, mui)
else:
return (m, (- 1), numone, mui) |
class Class_Tools():
def func_str_chaifen(self, encode_type, source_text, length):
try:
changdu = int(length)
except:
return [0, '!', '']
if (changdu > len(source_text)):
return [0, '!', '']
else:
text = [source_text[i:(i + changdu)] for i in range(0, len(source_text), changdu)]
return_text = ' '.join(text)
return [1, str(return_text), '']
def func_str_split(self, encode_type, source_text, split_str):
text = source_text.split(split_str)
return_text = ' '.join(text)
return [1, str(return_text.strip()), '']
def func_str_tongji(self, encode_type, source_text):
s = ''
l = len(source_text)
for x in range(0, l):
if (not (source_text[x] in s)):
s += source_text[x]
result = {d: 0 for d in s}
for d in source_text:
for alpha in s:
if (d == alpha):
result[alpha] = (result[alpha] + 1)
result1 = sorted(result.items(), key=(lambda x: x[1]), reverse=True)
return_text = '->:\n:'
for x in result1:
return_text += str(x[0])
return_text += '\n:'
for x in result1:
return_text += str(x[1])
return_text += '\n\n->:\n:'
result2 = sorted(result.items(), key=(lambda x: x[1]), reverse=False)
for x in result2:
return_text += str(x[0])
result2 = sorted(result.items(), key=(lambda x: x[1]), reverse=False)
return_text += '\n:'
for x in result2:
return_text += str(x[1])
return_text += '\n\n'
for x in result1:
return_text += (((x[0] + ':') + str(x[1])) + '\n')
return [1, return_text, '']
def func_str_re(self, encode_type, source_text):
text = source_text.strip()
return [1, str(text[::(- 1)]), ''] |
class NetmapCommand(ops.cmd.DszCommand):
optgroups = {}
reqgroups = []
reqopts = []
defopts = {}
def __init__(self, plugin='netmap', netmap_type=None, **optdict):
ops.cmd.DszCommand.__init__(self, plugin, **optdict)
self.netmap_type = netmap_type
def validateInput(self):
for opt in self.optdict:
if (opt not in VALID_OPTIONS):
return False
return True
minimal = property((lambda x: getBoolOption(x, 'minimal')), (lambda x, y: setBoolOption(x, y, 'minimal')))
netmap_type = property((lambda x: getValueOption(x, 'type')), (lambda x, y: setListOption(x, y, 'type', ['all', 'connected', 'remembered']))) |
_hook('output_csv')
class OutputCSVHook(ClassyHook):
on_phase_start = ClassyHook._noop
on_start = ClassyHook._noop
def __init__(self, folder, id_key='id', delimiter='\t') -> None:
super().__init__()
self.output_path = f'{folder}/{DEFAULT_FILE_NAME}'
self.file = PathManager.open(self.output_path, 'a')
self.id_key = id_key
self.delimiter = delimiter
def on_start(self, task) -> None:
self.file.write((self.delimiter.join(['sample_id', 'prediction', 'target']) + '\n'))
def on_step(self, task) -> None:
if (self.id_key not in task.last_batch.sample):
return
if task.train:
return
assert (len(task.last_batch.output.shape) == 2), 'First dimension must be batch size, second is the class logits'
assert (len(task.last_batch.sample['target'].shape) in [1, 2]), 'Target must be integer or one-hot encoded vectors'
sample_ids = task.last_batch.sample[self.id_key].tolist()
predictions = task.last_batch.output.argmax(dim=1).tolist()
target = task.last_batch.sample['target']
if (len(target.shape) == 2):
targets = target.argmax(dim=1)
targets = target.tolist()
for (sample_id, prediction, target) in zip(sample_ids, predictions, targets):
self.file.write((self.delimiter.join([str(sample_id), str(prediction), str(target)]) + '\n'))
def on_phase_end(self, task) -> None:
self.file.flush()
def on_end(self, task) -> None:
self.file.close() |
class TestAssertIsInstance(TestCase):
def test_you(self):
self.assertIsInstance(abc, 'xxx')
def test_me(self):
self.assertIsInstance(123, (xxx + y))
self.assertIsInstance(456, (aaa and bbb))
self.assertIsInstance(789, (ccc or ddd))
self.assertIsInstance(123, (True if You else False))
def test_everybody(self):
self.assertIsInstance('abc', 'def')
def test_message(self):
self.assertIsInstance((123 + z), (xxx + z), msg='This is wrong!')
self.assertIsInstance(123, (xxx + z), 'This is wrong!') |
def build_from_dict(cfg, registry, default_args=None):
assert (isinstance(cfg, dict) and ('type' in cfg))
args = cfg.copy()
obj_type = args.pop('type')
if isinstance(obj_type, str):
clazz = registry[obj_type]
clazz_name = clazz.split('.')[(- 1)]
module_name = clazz[0:((len(clazz) - len(clazz_name)) - 1)]
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
obj = createInstance(module_name=module_name, class_name=clazz_name, **args)
return obj
return None |
(auto_attribs=True)
class RepairTarget_Detector_Target_Repaired(RepairTarget_Detector_Target_Remaining):
num_original_targetedVuls: int = attr.ib(repr=False)
num_repaired: Union[(int, float)] = math.inf
num_remaining: int = attr.ib(repr=False, init=False, default=attr.Factory((lambda self: ((self.num_original_targetedVuls - self.num_repaired) if (not math.isinf(self.num_repaired)) else 0)), takes_self=True)) |
class traindataset(data.Dataset):
def __init__(self, root, mode, transform=None, num_class=5, multitask=False, args=None):
self.root = os.path.expanduser(root)
self.transform = transform
self.mode = mode
self.train_label = []
self.test_label = []
self.name = []
self.num_class = num_class
self.multitask = multitask
if (self.mode == 'train'):
self.train_root = glob.glob((self.root + '/train/train/*.jpeg'))
dictLabels_DR = self.load_csv((self.root + '/trainLabels.csv'))
for each_one in self.train_root:
label_DR = [k for (k, v) in dictLabels_DR.items() if (each_one.split('/')[(- 1)][:(- 5)] in v)]
self.train_label.append(int(label_DR[0]))
assert (len(label_DR) == 1)
self.name.append(each_one.split('/')[(- 1)])
assert (len(self.train_label) == len(self.train_root))
print('=> Total Train: ', len(self.train_root), ' DR images ')
elif (self.mode == 'val'):
self.test_root = glob.glob((self.root + '/train/train/*.jpeg'))
self.test_root = self.test_root[:30]
dictLabels_DR = self.load_csv((self.root + '/trainLabels.csv'))
for each_one in self.test_root:
label_DR = [k for (k, v) in dictLabels_DR.items() if (each_one.split('/')[(- 1)][:(- 5)] in v)]
self.test_label.append(int(label_DR[0]))
assert (len(label_DR) == 1)
self.name.append(each_one.split('/')[(- 1)])
assert (len(self.test_label) == len(self.test_root))
print('=> Total Test: ', len(self.test_root), ' DR images ')
def load_csv(self, path):
dictLabels_DR = {}
with open(path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader, None)
for (i, row) in enumerate(csvreader):
filename = row[0]
label1 = row[1]
if (label1 in dictLabels_DR.keys()):
dictLabels_DR[label1].append(filename)
else:
dictLabels_DR[label1] = [filename]
return dictLabels_DR
def __getitem__(self, index):
if (self.mode == 'train'):
img = Image.open(self.train_root[index])
img = img.convert('RGB')
(label, name) = (self.train_label[index], self.name[index])
elif (self.mode == 'val'):
img = Image.open(self.test_root[index])
img = img.convert('RGB')
(label, name) = (self.test_label[index], self.name[index])
img = self.transform(img)
return (img, label, name)
def __len__(self):
if (self.mode == 'train'):
return len(self.train_root)
elif (self.mode == 'val'):
return len(self.test_root) |
class encoder(nn.Module):
def __init__(self, dim, nc=1):
super(encoder, self).__init__()
self.dim = dim
nf = 64
self.c1 = dcgan_conv(nc, nf)
self.c2 = dcgan_conv(nf, (nf * 2))
self.c3 = dcgan_conv((nf * 2), (nf * 4))
self.c4 = dcgan_conv((nf * 4), (nf * 8))
self.c5 = dcgan_conv((nf * 8), (nf * 8))
self.c6 = nn.Sequential(nn.Conv2d((nf * 8), dim, 4, 1, 0), nn.BatchNorm2d(dim), nn.Tanh())
def forward(self, input):
h1 = self.c1(input)
h2 = self.c2(h1)
h3 = self.c3(h2)
h4 = self.c4(h3)
h5 = self.c5(h4)
h6 = self.c6(h5)
return (h6.view((- 1), self.dim), [h1, h2, h3, h4, h5]) |
class RoofProperty(bpy.types.PropertyGroup):
roof_types = [('FLAT', 'Flat', '', 0), ('GABLE', 'Gable', '', 1), ('HIP', 'Hip', '', 2)]
type: EnumProperty(name='Roof Type', items=roof_types, default='HIP', description='Type of roof to create')
gable_types = [('OPEN', 'Open', '', 0), ('BOX', 'Box', '', 1)]
gable_type: EnumProperty(name='Gable Type', items=gable_types, default='OPEN', description='Type of gable roof to create')
thickness: FloatProperty(name='Thickness', min=get_scaled_unit(0.01), max=get_scaled_unit(1.0), default=get_scaled_unit(0.1), unit='LENGTH', description='Thickness of roof hangs')
outset: FloatProperty(name='Outset', min=get_scaled_unit(0.01), max=get_scaled_unit(1.0), default=get_scaled_unit(0.1), unit='LENGTH', description='Outset of roof hangs')
height: FloatProperty(name='Height', min=get_scaled_unit(0.01), max=get_scaled_unit(10.0), default=get_scaled_unit(1), unit='LENGTH', description='Height of entire roof')
add_border: BoolProperty(name='Add Border', default=True, description='Whether to add extruded border around flat roof')
border: FloatProperty(name='Border', min=get_scaled_unit(0.01), max=get_scaled_unit(1.0), default=get_scaled_unit(0.1), unit='LENGTH', description='Width of extruded border')
def draw(self, context, layout):
layout.prop(self, 'type', text='')
box = layout.box()
if (self.type == 'FLAT'):
col = box.column(align=True)
col.prop(self, 'thickness')
col.prop(self, 'outset')
col.prop(self, 'add_border')
if self.add_border:
col.prop(self, 'border')
elif (self.type == 'GABLE'):
row = box.row(align=True)
row.prop(self, 'gable_type', expand=True)
col = box.column(align=True)
col.prop(self, 'thickness')
col.prop(self, 'outset')
col.prop(self, 'height')
else:
col = box.column(align=True)
col.prop(self, 'thickness')
col.prop(self, 'outset')
col.prop(self, 'height') |
def perframe_sequence_trainer_noattn(conditioning_input_shapes, conditioning_input_names, input_gt_frames_shape, perframe_painter_model, seq_len, is_done_model=None, n_const_frames=1, do_output_disc_stack=False, n_prev_frames=None, n_prev_disc_frames=1, n_painter_frame_outputs=2):
if (n_prev_frames is None):
n_prev_frames = (seq_len - 1)
if (not isinstance(conditioning_input_shapes, list)):
conditioning_input_shapes = [conditioning_input_shapes]
if (conditioning_input_names is None):
conditioning_input_names = ['cond_input_{}'.format(ii) for ii in range(len(conditioning_input_shapes))]
conditioning_inputs = []
for (ii, input_shape) in enumerate(conditioning_input_shapes):
conditioning_inputs.append(Input(input_shape, name=conditioning_input_names[ii]))
input_gt_frames = Input(input_gt_frames_shape, name='input_gt_frames')
inputs = (conditioning_inputs + [input_gt_frames])
prev_frames = conditioning_inputs[1:]
gt_frames = Reshape(input_gt_frames_shape, name='reshape_gt')(input_gt_frames)
const_frames = conditioning_inputs[:n_const_frames]
curr_prev_frames = conditioning_inputs[n_const_frames:(n_const_frames + n_prev_frames)]
curr_prev_attn_maps = conditioning_inputs[(n_const_frames + n_prev_frames):]
last_frame_seq = Lambda((lambda x: tf.tile(K.expand_dims(x, axis=(- 1)), [1, 1, 1, 1, seq_len])), name='lambda_tile_slice_last_frame_seq')(const_frames[0])
director_preds_seqs = []
painter_preds_seqs = []
for t in range(seq_len):
painter_cond_inputs = (const_frames + curr_prev_frames)
gt_frame = Lambda((lambda x: tf.gather(x, t, axis=(- 1))), name='lambda_slice_gt_frames_t{}'.format(t))(gt_frames)
painter_ae_inputs = (painter_cond_inputs + [gt_frame])
painter_preds = perframe_painter_model((painter_ae_inputs + painter_cond_inputs))
if (not isinstance(painter_preds, list)):
painter_preds = [painter_preds]
clipped_painter_frames = []
for ppi in range(n_painter_frame_outputs):
pred_frame = painter_preds[ppi]
pred_frame = Lambda((lambda x: tf.clip_by_value(x, (- 1.0), 1.0)), name=f'lambda_clip_frame_{ppi}_t{t}')(pred_frame)
clipped_painter_frames.append(pred_frame)
painter_preds = (clipped_painter_frames + painter_preds[n_painter_frame_outputs:])
if (n_prev_disc_frames > 0):
prev_frames = curr_prev_frames[(- n_prev_disc_frames):]
curr_prev_frames = (curr_prev_frames[1:] + [painter_preds[0]])
for (ppi, pp) in enumerate(painter_preds):
pp = Reshape((pp.get_shape().as_list()[1:] + [1]), name='reshape_t{}_pp{}'.format(t, ppi))(pp)
if (t == 0):
painter_preds_seqs.append(pp)
else:
painter_preds_seqs[ppi] = Concatenate(name='concat_t{}_pp{}'.format(t, ppi))([painter_preds_seqs[ppi], pp])
if do_output_disc_stack:
if (n_prev_disc_frames > 0):
prev_frames = Reshape((prev_frames.get_shape().as_list()[1:] + [1]), name='exptdim_t{}_prevframes'.format(t))(prev_frames)
if (t == 0):
prev_frames_seq = prev_frames
else:
prev_frames_seq = Concatenate(name='concat_t{}_prevframe'.format(t))([prev_frames_seq, prev_frames])
if (is_done_model is not None):
is_done_inputs = Concatenate(axis=(- 1))([const_frames[0], pred_frame])
is_done_pred = is_done_model(is_done_inputs)
is_done_pred = Reshape((is_done_pred.get_shape().as_list()[1:] + [1]), name='exptdim_t{}_isdone'.format(t))(is_done_pred)
if (t == 0):
is_done_preds_seq = is_done_pred
else:
is_done_preds_seq = Concatenate(name='concat_t{}_isdone'.format(t), axis=(- 1))([is_done_preds_seq, is_done_pred])
outputs = (director_preds_seqs + painter_preds_seqs)
if do_output_disc_stack:
disc_inputs = [last_frame_seq]
if (n_prev_disc_frames > 0):
disc_inputs.append(prev_frames_seq)
director_disc_stack = Concatenate(axis=(- 2), name='concat_director_disc_stack')((disc_inputs + [director_preds_seqs[0]]))
outputs += [director_disc_stack]
return Model(inputs=inputs, outputs=outputs, name='seqlen{}_perframe_trainer_model'.format(seq_len)) |
class Args():
is_training = False
layers = 1
rnn_size = 100
n_epochs = 3
batch_size = 50
dropout_p_hidden = 1
learning_rate = 0.001
decay = 0.96
decay_steps = 10
sigma = 0
init_as_normal = False
reset_after_session = True
session_key = 'SessionId'
item_key = 'ItemId'
time_key = 'Timestamps'
grad_cap = 0
test_model = 2
checkpoint_dir = './checkpoint'
loss = 'cross-entropy'
final_act = 'softmax'
hidden_act = 'tanh'
n_items = (- 1) |
class Solution(object):
def findCircleNum(self, M):
visited = ([0] * len(M))
count = 0
for i in range(len(M)):
if (visited[i] == 0):
self.dfs(M, visited, i)
count += 1
return count
def dfs(self, M, visited, i):
for j in range(len(M)):
if ((M[i][j] == 1) and (visited[j] == 0)):
visited[j] = 1
self.dfs(M, visited, j) |
class CSNBottleneck3d(Bottleneck3d):
def __init__(self, inplanes, planes, *args, bottleneck_mode='ir', **kwargs):
super(CSNBottleneck3d, self).__init__(inplanes, planes, *args, **kwargs)
self.bottleneck_mode = bottleneck_mode
conv2 = []
if (self.bottleneck_mode == 'ip'):
conv2.append(nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False))
conv2_kernel_size = self.conv2.conv.kernel_size
conv2_stride = self.conv2.conv.stride
conv2_padding = self.conv2.conv.padding
conv2_dilation = self.conv2.conv.dilation
conv2_bias = (True if self.conv2.conv.bias else False)
self.conv2 = ConvModule(planes, planes, conv2_kernel_size, stride=conv2_stride, padding=conv2_padding, dilation=conv2_dilation, bias=conv2_bias, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, groups=planes)
conv2.append(self.conv2)
self.conv2 = nn.Sequential(*conv2) |
def test_register_mismatch_method(he_pm: PluginManager) -> None:
class hello():
def he_method_notexists(self):
pass
plugin = hello()
he_pm.register(plugin)
with pytest.raises(PluginValidationError) as excinfo:
he_pm.check_pending()
assert (excinfo.value.plugin is plugin) |
class FC4_LogVol(FC3_LogVol):
removedKeywords = FC3_LogVol.removedKeywords
removedAttrs = FC3_LogVol.removedAttrs
def _getParser(self):
op = FC3_LogVol._getParser(self)
op.add_argument('--bytes-per-inode', dest='bytesPerInode', type=int, version=FC4, help='Specify the bytes/inode ratio.')
op.add_argument('--fsoptions', dest='fsopts', version=FC4, help='\n Specifies a free form string of options to be used when\n mounting the filesystem. This string will be copied into\n the ``/etc/fstab`` file of the installed system and should\n be enclosed in quotes.')
return op |
def generate_outline(premise, setting, characters, character_strings, instruct_model, generation_max_length, max_sections=5, fixed_outline_length=(- 1), outline_levels=1, model_string='text-davinci-002'):
premise_setting_chars = ((((((('Premise: ' + premise.strip()) + '\n\n') + 'Setting: ') + setting.strip()) + '\n\n') + 'Characters: ') + characters.strip())
if (fixed_outline_length > 0):
outline_prompt = (((premise_setting_chars + '\n\n\n\nOutline the ') + str(fixed_outline_length)) + ' main plot points of the story.\n\n1.')
else:
outline_prompt = (premise_setting_chars + '\n\n\n\nOutline the main plot points of the story.\n\n1.')
found_acceptable_outline = False
for i in range(5):
outline_logit_bias = get_repetition_logit_bias(instruct_model.tokenizer, outline_prompt, (- (2 ** (i + 1))))
name_tokens = set(sum([(instruct_model.tokenizer.encode(ent) + instruct_model.tokenizer.encode((' ' + ent))) for ent in character_strings.keys()], []))
for tok in name_tokens:
if (tok in outline_logit_bias):
del outline_logit_bias[tok]
outlines = instruct_model([outline_prompt], logit_bias=outline_logit_bias, generation_max_length=generation_max_length, num_completions=5, model_string=model_string)
for outline in outlines:
if (fixed_outline_length > 0):
if (((str(fixed_outline_length) + '.') not in outline) or ((str((fixed_outline_length + 1)) + '.') in outline)):
continue
if (len(split_list(('1.' + outline))) < 3):
continue
if (('2.' not in outline) or ('3.' not in outline)):
continue
if ((str(max_sections) + '.') in outline):
continue
if (calculate_repetition_length_penalty(outline, [setting, characters], is_outline=True) > 0):
continue
if (len(instruct_model.tokenizer.encode(outline)) < generation_max_length):
found_acceptable_outline = True
break
if found_acceptable_outline:
break
if (not found_acceptable_outline):
logging.warning("Warning: didn't find acceptable outline")
raise ValueError
outline = ('1.' + outline).strip()
logging.log(23, outline)
if (outline_levels > 1):
all_detailed_outlines = []
assert (outline_levels == 2)
for (outline_idx, outline_piece) in enumerate(split_list(outline)):
found_acceptable_outline = False
for i in range(5):
detailed_outline_logit_bias = get_repetition_logit_bias(instruct_model.tokenizer, ((outline_prompt + ' ') + ' '.join([op for op in split_list(outline)])), (- (2 ** (i + 1))))
name_tokens = set(sum([(instruct_model.tokenizer.encode(ent) + instruct_model.tokenizer.encode((' ' + ent))) for ent in character_strings.keys()], []))
for tok in name_tokens:
if (tok in outline_logit_bias):
del outline_logit_bias[tok]
detailed_outlines = instruct_model([(((((premise_setting_chars + '\n\nOutline:\n\n') + '\n\n'.join([op for op in split_list(outline)[:outline_idx]])) + '\n\nList the minor events in the next part of the story, in which ') + outline_piece.strip()) + '\n\n1.')], logit_bias=detailed_outline_logit_bias, generation_max_length=generation_max_length, num_completions=5, model_string=model_string)
for detailed_outline in detailed_outlines:
if (fixed_outline_length > 0):
if (((str(fixed_outline_length) + '.') not in detailed_outline) or ((str((fixed_outline_length + 1)) + '.') in detailed_outline)):
continue
if (len(split_list(('1.' + detailed_outline))) < 3):
continue
if (('2.' not in detailed_outline) or ('3.' not in detailed_outline)):
continue
if ((str(max_sections) + '.') in detailed_outline):
continue
if (calculate_repetition_length_penalty(detailed_outline, [setting, characters, outline], is_outline=True) > 0):
continue
if (len(instruct_model.tokenizer.encode(detailed_outline)) < generation_max_length):
found_acceptable_outline = True
break
if found_acceptable_outline:
break
if (not found_acceptable_outline):
logging.log(23, "Warning: didn't find acceptable outline")
raise ValueError
all_detailed_outlines.append(('1.' + detailed_outline))
outline = (outline, all_detailed_outlines)
return outline |
class AutoFeatureExtractor():
def __init__(self):
raise EnvironmentError('AutoFeatureExtractor is designed to be instantiated using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
config = kwargs.pop('config', None)
trust_remote_code = kwargs.pop('trust_remote_code', False)
kwargs['_from_auto'] = True
(config_dict, _) = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
feature_extractor_class = config_dict.get('feature_extractor_type', None)
feature_extractor_auto_map = None
if ('AutoFeatureExtractor' in config_dict.get('auto_map', {})):
feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor']
if ((feature_extractor_class is None) and (feature_extractor_auto_map is None)):
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
feature_extractor_class = getattr(config, 'feature_extractor_type', None)
if (hasattr(config, 'auto_map') and ('AutoFeatureExtractor' in config.auto_map)):
feature_extractor_auto_map = config.auto_map['AutoFeatureExtractor']
if (feature_extractor_class is not None):
if (feature_extractor_auto_map is not None):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the feature extractor file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
if (kwargs.get('revision', None) is None):
logger.warning('Explicitly passing a `revision` is encouraged when loading a feature extractor with custom code to ensure no malicious code has been contributed in a newer revision.')
(module_file, class_name) = feature_extractor_auto_map.split('.')
feature_extractor_class = get_class_from_dynamic_module(pretrained_model_name_or_path, (module_file + '.py'), class_name, **kwargs)
else:
feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
return feature_extractor_class.from_dict(config_dict, **kwargs)
elif (type(config) in FEATURE_EXTRACTOR_MAPPING):
feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
return feature_extractor_class.from_dict(config_dict, **kwargs)
raise ValueError(f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a `feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {{CONFIG_NAME}}: {{', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}}")
def register(config_class, feature_extractor_class):
FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class) |
def draw_measure_tag(x, y, dx, dy, name, style=None, fill='bgcolor'):
global bgcolor
if (fill == 'bgcolor'):
fill = bgcolor
tikz_str = ('fill=%s' % fill)
if style:
tikz_str += (',' + style)
if (orientation == 'vertical'):
print(('\\draw[%s] (%f, %f) -- (%f,%f) -- (%f,%f) -- (%f, %f) -- (%f, %f) -- cycle;' % (tikz_str, x, (y + (0.5 * (dy + dx))), (x + (0.5 * dx)), (y + (0.5 * dy)), (x + (0.5 * dx)), (y - (0.5 * dy)), (x - (0.5 * dx)), (y - (0.5 * dy)), (x - (0.5 * dx)), (y + (0.5 * dy)))))
else:
print(('\\draw[%s] (%f, %f) -- (%f,%f) -- (%f,%f) -- (%f, %f) -- (%f, %f) -- cycle;' % (tikz_str, (x - (0.5 * (dx + dy))), y, (x - (0.5 * dx)), (y + (0.5 * dy)), (x + (0.5 * dx)), (y + (0.5 * dy)), (x + (0.5 * dx)), (y - (0.5 * dy)), (x - (0.5 * dx)), (y - (0.5 * dy)))))
write_operator_name(x, y, name) |
class Add(ImageOnlyTransform):
identity_param = 0
def __init__(self, values: List[float]):
if (self.identity_param not in values):
values = ([self.identity_param] + list(values))
super().__init__('value', values)
def apply_aug_image(self, image, value=0, **kwargs):
if (value != self.identity_param):
image = F.add(image, value)
return image |
class DataIterator(object):
def __init__(self, args, dataset, batch_size, device=None, is_test=False, shuffle=True):
self.args = args
(self.batch_size, self.is_test, self.dataset) = (batch_size, is_test, dataset)
self.iterations = 0
self.device = device
self.shuffle = shuffle
self.sort_key = (lambda x: len(x[1]))
self._iterations_this_epoch = 0
if (self.args.task == 'abs'):
self.batch_size_fn = abs_batch_size_fn
else:
self.batch_size_fn = ext_batch_size_fn
def data(self):
if self.shuffle:
random.shuffle(self.dataset)
xs = self.dataset
return xs
def preprocess(self, ex, is_test):
src = ex['src'][:self.args.max_src_len]
tgt = ex['tgt'][:self.args.max_tgt_len]
src_sent_labels = ex['src_sent_labels']
segs = ex['segs']
if (not self.args.use_interval):
segs = ([0] * len(segs))
clss = ex['clss']
src_txt = ex['src_txt']
tgt_txt = ex['tgt_txt']
end_id = [src[(- 1)]]
src = (src[:(- 1)][:(self.args.max_pos - 1)] + end_id)
segs = segs[:self.args.max_pos]
max_sent_id = bisect.bisect_left(clss, self.args.max_pos)
src_sent_labels = src_sent_labels[:max_sent_id]
clss = clss[:max_sent_id]
if is_test:
return (src, tgt, segs, clss, src_sent_labels, src_txt, tgt_txt)
else:
return (src, tgt, segs, clss, src_sent_labels)
def batch_buffer(self, data, batch_size):
(minibatch, size_so_far) = ([], 0)
for ex in data:
if (len(ex['src']) == 0):
continue
ex = self.preprocess(ex, self.is_test)
if (ex is None):
continue
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if (size_so_far >= batch_size):
(yield minibatch)
(minibatch, size_so_far) = ([], 0)
if minibatch:
(yield minibatch)
def batch(self, data, batch_size):
(minibatch, size_so_far) = ([], 0)
for ex in data:
minibatch.append(ex)
size_so_far = self.batch_size_fn(ex, len(minibatch))
if (size_so_far == batch_size):
(yield minibatch)
(minibatch, size_so_far) = ([], 0)
if minibatch:
(yield minibatch)
def create_batches(self):
data = self.data()
for buffer in self.batch_buffer(data, (self.batch_size * 200)):
if (self.args.task == 'abs'):
p_batch = sorted(buffer, key=(lambda x: len(x[2])))
p_batch = sorted(p_batch, key=(lambda x: len(x[1])))
else:
p_batch = sorted(buffer, key=(lambda x: len(x[2])))
p_batch = self.batch(p_batch, self.batch_size)
p_batch = list(p_batch)
if self.shuffle:
random.shuffle(p_batch)
for b in p_batch:
if (len(b) == 0):
continue
(yield b)
def __iter__(self):
while True:
self.batches = self.create_batches()
for (idx, minibatch) in enumerate(self.batches):
if (self._iterations_this_epoch > idx):
continue
self.iterations += 1
self._iterations_this_epoch += 1
batch = Batch(minibatch, self.device, self.is_test)
(yield batch)
return |
def get_glob(path):
if isinstance(path, str):
return glob.glob(path, recursive=True)
if isinstance(path, os.PathLike):
return glob.glob(str(path), recursive=True)
elif isinstance(path, (list, tuple)):
return list(chain.from_iterable((glob.glob(str(p), recursive=True) for p in path)))
else:
raise TypeError(f"path should be string, path-like or a list. Instead, it's a {type(path)}") |
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
(np_start, cur_np) = (0, 'NONE')
for (i, token) in enumerate(sentence):
np_type = (token.pos_ if (token.pos_ in {'NOUN', 'PROPN'}) else 'NONE')
if (np_type != cur_np):
if (cur_np != 'NONE'):
noun_chunks.add((np_start, i))
if (np_type != 'NONE'):
np_start = i
cur_np = np_type
if (cur_np != 'NONE'):
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)] |
class TestTransformerDelay(unittest.TestCase):
def test_default(self):
tfm = new_transformer()
tfm.delay([1.0])
actual_args = tfm.effects
expected_args = ['delay', '1.000000']
self.assertEqual(expected_args, actual_args)
actual_log = tfm.effects_log
expected_log = ['delay']
self.assertEqual(expected_log, actual_log)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_default_three_channel(self):
tfm = new_transformer()
tfm.delay([0.0, 1.0])
actual_args = tfm.effects
expected_args = ['delay', '0.000000', '1.000000']
self.assertEqual(expected_args, actual_args)
actual_log = tfm.effects_log
expected_log = ['delay']
self.assertEqual(expected_log, actual_log)
actual_res = tfm.build(INPUT_FILE4, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE4, OUTPUT_FILE, tfm)
def test_invalid_position_type(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.delay(1.0)
def test_invalid_position_vals(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.delay([(- 1.0), 1.0]) |
def parasitic_cphase_compensation(cphase_angle: float) -> Callable[([FermiHubbardParameters], FermiHubbardParameters)]:
def compensate(parameters: FermiHubbardParameters) -> FermiHubbardParameters:
cphase = (cphase_angle / parameters.dt)
if isinstance(parameters.layout, ZigZagLayout):
v = np.zeros((parameters.sites_count - 1))
v[0::2] = (2.0 * cphase)
v[1::2] = (4.0 * cphase)
elif isinstance(parameters.layout, LineLayout):
v = np.full((parameters.sites_count - 1), (2.0 * cphase))
else:
raise ValueError(f'Unsupported layout {parameters.layout}')
v_parameters = deepcopy(parameters)
v_parameters.hamiltonian.v = tuple(v)
return v_parameters
return compensate |
def simple_river_split_gauge_model():
in_flow = 100.0
min_flow_req = 40.0
out_flow = 50.0
model = pywr.core.Model()
inpt = river.Catchment(model, name='Catchment', flow=in_flow)
lnk = river.RiverSplitWithGauge(model, name='Gauge', mrf=min_flow_req, mrf_cost=(- 100), slot_names=('river', 'abstraction'), factors=[3, 1])
inpt.connect(lnk)
estuary = pywr.core.Output(model, name='Estuary')
lnk.connect(estuary, from_slot='river')
otpt = pywr.core.Output(model, name='Demand', max_flow=out_flow, cost=(- 10))
lnk.connect(otpt, from_slot='abstraction')
net_flow_after_mrf = (in_flow - min_flow_req)
expected_node_results = {'Catchment': in_flow, 'Gauge': in_flow, 'Gauge Sublink 0': min_flow_req, 'Gauge Sublink 1': (net_flow_after_mrf * 0.75), 'Gauge Sublink 2': (net_flow_after_mrf * 0.25), 'Demand': min(out_flow, (net_flow_after_mrf * 0.25))}
return (model, expected_node_results) |
def passlib_or_crypt(secret, algorithm, salt=None, salt_size=None, rounds=None, ident=None):
if PASSLIB_AVAILABLE:
return PasslibHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
if HAS_CRYPT:
return CryptHash(algorithm).hash(secret, salt=salt, salt_size=salt_size, rounds=rounds, ident=ident)
raise Exception('Unable to encrypt nor hash, either crypt or passlib must be installed.', orig_exc=CRYPT_E) |
class OldGeneratorReach(GeneratorReach):
_digraph: graph_module.BaseGraph
_state: State
_game: GameDescription
_reachable_paths: (dict[(int, list[int])] | None)
_reachable_costs: (dict[(int, int)] | None)
_node_reachable_cache: dict[(int, bool)]
_unreachable_paths: dict[(tuple[(int, int)], RequirementSet)]
_safe_nodes: (_SafeNodes | None)
_is_node_safe_cache: dict[(int, bool)]
def __deepcopy__(self, memodict: dict) -> OldGeneratorReach:
reach = OldGeneratorReach(self._game, self._state, self._digraph.copy())
reach._unreachable_paths = copy.copy(self._unreachable_paths)
reach._reachable_paths = self._reachable_paths
reach._reachable_costs = self._reachable_costs
reach._safe_nodes = self._safe_nodes
reach._node_reachable_cache = copy.copy(self._node_reachable_cache)
reach._is_node_safe_cache = copy.copy(self._is_node_safe_cache)
return reach
def __init__(self, game: GameDescription, state: State, graph: graph_module.BaseGraph):
self._game = game
self.all_nodes = game.region_list.all_nodes
self._state = state
self._digraph = graph
self._unreachable_paths = {}
self._reachable_paths = None
self._node_reachable_cache = {}
self._is_node_safe_cache = {}
def reach_from_state(cls, game: GameDescription, initial_state: State) -> GeneratorReach:
reach = cls(game, initial_state, graph_module.RandovaniaGraph.new())
game.region_list.ensure_has_node_cache()
reach._expand_graph([GraphPath(None, initial_state.node, RequirementSet.trivial())])
return reach
def _potential_nodes_from(self, node: Node) -> Iterator[tuple[(Node, RequirementSet)]]:
extra_requirement = _extra_requirement_for_node(self._game, self.node_context(), node)
requirement_to_leave = node.requirement_to_leave(self._state.node_context())
for (target_node, requirement) in self._game.region_list.potential_nodes_from(node, self.node_context()):
if (target_node is None):
continue
if (requirement_to_leave != Requirement.trivial()):
requirement = RequirementAnd([requirement, requirement_to_leave])
if (extra_requirement is not None):
requirement = RequirementAnd([requirement, extra_requirement])
(yield (target_node, requirement.patch_requirements(self._state.resources, 1.0, self._state.resource_database).as_set(self._state.resource_database)))
def _expand_graph(self, paths_to_check: list[GraphPath]) -> None:
self._reachable_paths = None
while paths_to_check:
path = paths_to_check.pop(0)
if path.is_in_graph(self._digraph):
continue
path.add_to_graph(self._digraph)
for (target_node, requirement) in self._potential_nodes_from(path.node):
if requirement.satisfied(self._state.resources, self._state.energy, self._state.resource_database):
paths_to_check.append(GraphPath(path.node, target_node, requirement))
else:
self._unreachable_paths[(path.node.node_index, target_node.node_index)] = requirement
self._safe_nodes = None
def _can_advance(self, node: Node) -> bool:
if node.is_resource_node:
assert isinstance(node, ResourceNode)
return node.is_collected(self.node_context())
else:
return True
def _calculate_safe_nodes(self) -> None:
if (self._safe_nodes is not None):
return
for component in self._digraph.strongly_connected_components():
if (self._state.node.node_index in component):
assert (self._safe_nodes is None)
self._safe_nodes = _SafeNodes(sorted(component), component)
assert (self._safe_nodes is not None)
def _calculate_reachable_paths(self) -> None:
if (self._reachable_paths is not None):
return
all_nodes = typing.cast(tuple[(Node, ...)], self.all_nodes)
context = self.node_context()
def _is_collected(target: int) -> int:
node: Node = all_nodes[target]
if node.is_resource_node:
assert isinstance(node, ResourceNode)
if node.is_collected(context):
return 0
else:
return 1
else:
return 0
def weight(source: int, target: int, attributes: RequirementSet) -> int:
return _is_collected(target)
(self._reachable_costs, self._reachable_paths) = self._digraph.multi_source_dijkstra({self.state.node.node_index}, weight=weight)
def is_reachable_node(self, node: Node) -> bool:
index = node.node_index
cached_value = self._node_reachable_cache.get(index)
if (cached_value is not None):
return cached_value
self._calculate_reachable_paths()
assert (self._reachable_costs is not None)
cost = self._reachable_costs.get(index)
if (cost is not None):
if (cost == 0):
self._node_reachable_cache[index] = True
elif (cost == 1):
self._node_reachable_cache[index] = (not self._can_advance(node))
else:
self._node_reachable_cache[index] = False
return self._node_reachable_cache[index]
else:
return False
def connected_nodes(self) -> Iterator[Node]:
self._calculate_reachable_paths()
assert (self._reachable_paths is not None)
all_nodes = typing.cast(tuple[(Node, ...)], self.all_nodes)
for index in self._reachable_paths.keys():
(yield all_nodes[index])
def state(self) -> State:
return self._state
def game(self) -> GameDescription:
return self._game
def nodes(self) -> Iterator[Node]:
for (i, node) in enumerate(typing.cast(tuple[(Node, ...)], self.all_nodes)):
if (i in self._digraph):
(yield node)
def safe_nodes(self) -> Iterator[Node]:
self._calculate_safe_nodes()
assert (self._safe_nodes is not None)
all_nodes = typing.cast(tuple[(Node, ...)], self.all_nodes)
for i in self._safe_nodes.as_list:
(yield all_nodes[i])
def is_safe_node(self, node: Node) -> bool:
node_index = node.node_index
is_safe = self._is_node_safe_cache.get(node_index)
if (is_safe is not None):
return is_safe
self._calculate_safe_nodes()
assert (self._safe_nodes is not None)
self._is_node_safe_cache[node_index] = (node_index in self._safe_nodes.as_set)
return self._is_node_safe_cache[node_index]
def advance_to(self, new_state: State, is_safe: bool=False) -> None:
assert (new_state.previous_state == self.state)
if (is_safe or self.is_safe_node(new_state.node)):
for index in [index for (index, flag) in self._node_reachable_cache.items() if (not flag)]:
del self._node_reachable_cache[index]
for node_index in [node_index for (node_index, flag) in self._is_node_safe_cache.items() if (not flag)]:
del self._is_node_safe_cache[node_index]
else:
self._node_reachable_cache = {}
self._is_node_safe_cache = {}
self._state = new_state
all_nodes = typing.cast(tuple[(Node, ...)], self.all_nodes)
paths_to_check: list[GraphPath] = []
edges_to_remove = []
for (edge, requirement) in self._unreachable_paths.items():
if requirement.satisfied(self._state.resources, self._state.energy, self._state.resource_database):
(from_index, to_index) = edge
paths_to_check.append(GraphPath(all_nodes[from_index], all_nodes[to_index], requirement))
edges_to_remove.append(edge)
for edge in edges_to_remove:
del self._unreachable_paths[edge]
self._expand_graph(paths_to_check)
def act_on(self, node: ResourceNode) -> None:
new_dangerous_resources = {resource for (resource, quantity) in node.resource_gain_on_collect(self.state.node_context()) if (resource in self.game.dangerous_resources)}
new_state = self.state.act_on_node(node)
if new_dangerous_resources:
edges_to_remove = []
for (source, target, requirement) in self._digraph.edges_data():
if (not new_dangerous_resources.isdisjoint(requirement.dangerous_resources)):
if (not requirement.satisfied(new_state.resources, new_state.energy, new_state.resource_database)):
edges_to_remove.append((source, target))
for edge in edges_to_remove:
self._digraph.remove_edge(*edge)
self.advance_to(new_state)
def unreachable_nodes_with_requirements(self) -> dict[(Node, RequirementSet)]:
results: dict[(Node, RequirementSet)] = {}
all_nodes = typing.cast(tuple[(Node, ...)], self.all_nodes)
for ((_, node_index), requirement) in self._unreachable_paths.items():
node = all_nodes[node_index]
if self.is_reachable_node(node):
continue
requirements = requirement.patch_requirements(self.state.resources, self.state.resource_database)
if (node in results):
results[node] = results[node].expand_alternatives(requirements)
else:
results[node] = requirement
return results
def victory_condition_satisfied(self) -> bool:
return self.game.victory_condition.satisfied(self.state.resources, self.state.energy, self.state.resource_database) |
class Driver(uvm_driver):
def build_phase(self):
self.ap = uvm_analysis_port('ap', self)
def start_of_simulation_phase(self):
self.bfm = TinyAluBfm()
async def launch_tb(self):
(await self.bfm.reset())
self.bfm.start_bfm()
async def run_phase(self):
(await self.launch_tb())
while True:
cmd = (await self.seq_item_port.get_next_item())
(await self.bfm.send_op(cmd.A, cmd.B, cmd.op))
result = (await self.bfm.get_result())
self.ap.write(result)
cmd.result = result
self.seq_item_port.item_done() |
.mssql_server_required
class TestStringTypeConversion(unittest.TestCase):
def setUp(self):
self.mssql = mssqlconn()
for (name, size) in VARIABLE_TYPES:
dbtype = name.lower()
identifier = (dbtype if (dbtype == 'text') else ('%s(%d)' % (dbtype, size)))
try:
self.mssql.execute_non_query(('\n CREATE PROCEDURE [dbo].[pymssqlTest%(name)s]\n %(dbtype)s %(identifier)s,\n %(dbtype)s %(identifier)s output\n AS\n BEGIN\n SET %(dbtype)s = %(dbtype)s;\n RETURN 0;\n END\n ' % {'dbtype': dbtype, 'name': name, 'identifier': identifier}))
except:
if (name == 'Text'):
raise
def tearDown(self):
for (name, size) in VARIABLE_TYPES:
self.mssql.execute_non_query(('DROP PROCEDURE [dbo].[pymssqlTest%s]' % name))
self.mssql.close()
def testChar(self):
input = 'test'
proc = self.mssql.init_procedure('pymssqlTestChar')
proc.bind(input, _mssql.SQLCHAR, '')
proc.bind(None, _mssql.SQLCHAR, '', output=True, max_length=4)
proc.execute()
self.assertEqual(input, proc.parameters[''])
def testText(self):
input = 'test'
proc = self.mssql.init_procedure('pymssqlTestText')
proc.bind(input, _mssql.SQLTEXT, '')
proc.bind(None, _mssql.SQLVARCHAR, '', output=True)
proc.execute()
self.assertEqual(input, proc.parameters[''])
def testVarChar(self):
input = 'test'
proc = self.mssql.init_procedure('pymssqlTestVarChar')
proc.bind(input, _mssql.SQLVARCHAR, '')
proc.bind(None, _mssql.SQLVARCHAR, '', output=True)
proc.execute()
self.assertEqual(input, proc.parameters[''])
def testVarBinary(self):
def check_conversion(input, output_type):
proc = self.mssql.init_procedure('pymssqlTestVarBinary')
proc.bind(input, _mssql.SQLVARBINARY, '')
proc.bind(None, _mssql.SQLVARBINARY, '', output=True)
proc.execute()
self.assertEqual(input, proc.parameters[''])
self.assertEqual(output_type, type(proc.parameters['']))
check_conversion(bytes(b'\xde\xad\xbe\xef'), bytes)
check_conversion(bytearray(b'\xde\xad\xbe\xef'), bytes)
with pytest.raises(TypeError) as exc_info:
check_conversion('FOO', bytes)
assert ('value can only be bytes or bytearray' == str(exc_info.value)) |
class AverageWindowAttention(AverageAttention):
def __init__(self, embed_dim, dropout=0.0, bias=True, window_size=0):
super().__init__(embed_dim, dropout, bias)
self.window_size = window_size
def _forward(self, value, mask_trick, mask_future_timesteps):
if (self.window_size == 1):
return (value, None)
(length, batch_size) = value.size()[:2]
if (not mask_future_timesteps):
raise NotImplementedError()
else:
v = value.transpose(0, 1)
attn_weights = value.new_ones(length, length, requires_grad=False)
if (self.window_size > 0):
attn_weights.tril_(0).triu_((1 - self.window_size))
attn_weights.div_(attn_weights.sum(1, keepdim=True))
attn_weights = attn_weights.unsqueeze_(0).repeat(batch_size, 1, 1)
attn = torch.bmm(attn_weights, v)
attn = attn.transpose(0, 1).contiguous()
return (attn, attn_weights)
def _forward_incremental(self, value, mask_trick, mask_future_timesteps, incremental_state):
if (self.window_size == 1):
return (value, None)
(length, batch_size) = value.size()[:2]
if mask_trick:
saved_state = self._get_input_buffer(incremental_state)
if ('prev_vec' in saved_state):
value = torch.cat([saved_state['prev_vec'], value], dim=0)
saved_state['prev_vec'] = value[(- self.window_size):]
self._set_input_buffer(incremental_state, saved_state)
attn_weights = None
attn = value.mean(0, keepdim=True)
else:
saved_state = self._get_input_buffer(incremental_state)
if ('prev_sum' in saved_state):
prev_sum = saved_state['prev_sum']
values = torch.cat([saved_state['prev_vec'], value], dim=0)
curr_sum = (prev_sum + value)
if (values.size(0) > self.window_size):
curr_sum -= values[:1]
avg_size = min(values.size(0), self.window_size)
attn = (curr_sum / avg_size)
else:
curr_sum = value
values = value
attn = value
saved_state['prev_vec'] = values[(- self.window_size):]
saved_state['prev_sum'] = curr_sum
self._set_input_buffer(incremental_state, saved_state)
attn_weights = None
return (attn, attn_weights)
def extra_repr(self):
return 'embed_dim={}, dropout={}, window_size={}'.format(self.embed_dim, self.dropout, self.window_size) |
def binary_search(fre, cand, level):
(low, high) = (0, (len(fre) - 1))
if (low > high):
return (- 1)
while (low < high):
mid = int(((low + high) / 2))
if (cand <= fre[mid][0:(level - 1)]):
high = mid
else:
low = (mid + 1)
if (cand == fre[low][0:(level - 1)]):
return low
elif (((low + 1) < len(fre)) and (cand == fre[(low + 1)][0:(level - 1)])):
return (low + 1)
else:
return (- 1) |
_REGISTRY.register()
class MSMT17(ImageDataset):
dataset_url = None
dataset_name = 'msmt17'
def __init__(self, root='datasets', **kwargs):
self.dataset_dir = root
has_main_dir = False
for main_dir in VERSION_DICT:
if osp.exists(osp.join(self.dataset_dir, main_dir)):
train_dir = VERSION_DICT[main_dir][TRAIN_DIR_KEY]
test_dir = VERSION_DICT[main_dir][TEST_DIR_KEY]
has_main_dir = True
break
assert has_main_dir, 'Dataset folder not found'
self.train_dir = osp.join(self.dataset_dir, main_dir, train_dir)
self.test_dir = osp.join(self.dataset_dir, main_dir, test_dir)
self.list_train_path = osp.join(self.dataset_dir, main_dir, 'list_train.txt')
self.list_val_path = osp.join(self.dataset_dir, main_dir, 'list_val.txt')
self.list_query_path = osp.join(self.dataset_dir, main_dir, 'list_query.txt')
self.list_gallery_path = osp.join(self.dataset_dir, main_dir, 'list_gallery.txt')
required_files = [self.dataset_dir, self.train_dir, self.test_dir]
self.check_before_run(required_files)
train = self.process_dir(self.train_dir, self.list_train_path)
val = self.process_dir(self.train_dir, self.list_val_path)
query = self.process_dir(self.test_dir, self.list_query_path, is_train=False)
gallery = self.process_dir(self.test_dir, self.list_gallery_path, is_train=False)
num_train_pids = self.get_num_pids(train)
query_tmp = []
for (img_path, pid, camid) in query:
query_tmp.append((img_path, (pid + num_train_pids), camid))
del query
query = query_tmp
gallery_temp = []
for (img_path, pid, camid) in gallery:
gallery_temp.append((img_path, (pid + num_train_pids), camid))
del gallery
gallery = gallery_temp
if (('combineall' in kwargs) and kwargs['combineall']):
train += val
super(MSMT17, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path, list_path, is_train=True):
with open(list_path, 'r') as txt:
lines = txt.readlines()
data = []
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = (int(img_path.split('_')[2]) - 1)
img_path = osp.join(dir_path, img_path)
if is_train:
pid = ((self.dataset_name + '_') + str(pid))
camid = ((self.dataset_name + '_') + str(camid))
data.append((img_path, pid, camid))
return data |
class Effect6779(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
lvl = src.level
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('Skirmish Command')), 'warfareBuff3Multiplier', (src.getModifiedItemAttr('commandStrengthBonus') * lvl), **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('Skirmish Command')), 'warfareBuff4Multiplier', (src.getModifiedItemAttr('commandStrengthBonus') * lvl), **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('Skirmish Command')), 'warfareBuff1Multiplier', (src.getModifiedItemAttr('commandStrengthBonus') * lvl), **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('Skirmish Command')), 'warfareBuff2Multiplier', (src.getModifiedItemAttr('commandStrengthBonus') * lvl), **kwargs) |
def test_funcarg(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG)
result = testdir.runpytest('-v', f'--cov={script.dirpath()}', '--cov-report=term-missing', script)
result.stdout.fnmatch_lines(['*- coverage: platform *, python * -*', 'test_funcarg* 3 * 100%*', '*1 passed*'])
assert (result.ret == 0) |
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, (num_anchors * 4), kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], (- 1), 4) |
def test_pype_get_args_no_parent_context():
context = Context({'pype': {'name': 'pipe name', 'args': {'a': 'b'}}})
with get_arb_pipeline_scope(context):
(pipeline_name, args, out, use_parent_context, pipe_arg, skip_parse, raise_error, loader, groups, success_group, failure_group, py_dir, parent) = pype.get_arguments(context)
assert (pipeline_name == 'pipe name')
assert (args == {'a': 'b'})
assert (out is None)
assert (not use_parent_context)
assert (pipe_arg is None)
assert skip_parse
assert raise_error
assert (not loader)
assert (not groups)
assert (not success_group)
assert (not failure_group)
assert (py_dir is None)
assert (parent is None) |
def plot_time_series(link_matrix=None, coef_matrix=None, var_names=None, order=None, figsize=None, dpi=200, label_space_left=0.1, label_space_top=0.05, label_fontsize=12, alpha=0.001):
if ((link_matrix is None) and (coef_matrix is None)):
raise RuntimeError('link_matrix is None and coef_matrix is None')
if (link_matrix is None):
link_matrix = np.zeros_like(coef_matrix, dtype=int)
link_matrix[(abs(coef_matrix) >= alpha)] = 1
shape = link_matrix.shape
assert (link_matrix.ndim == 3)
assert (shape[0] == shape[1])
fig = pyplot.figure(figsize=figsize, dpi=dpi)
ax = fig.add_subplot(111, frame_on=False)
pyplot.axis('off')
(dim, _, max_lag) = link_matrix.shape
if (var_names is None):
var_names = [f'$X_{i}$' for i in range(dim)]
if (order is None):
order = range(dim)
link_matrix_tsg = np.copy(link_matrix)
tsg = np.zeros(((dim * max_lag), (dim * max_lag)))
_cache(128)
def flat(row, col):
return ((row * max_lag) + col)
for (i, j, tau) in np.column_stack(np.where(link_matrix_tsg)):
for t in range(max_lag):
if ((t - tau) >= 0):
tsg[(flat(i, (t - tau)), flat(j, t))] = 1.0
G = nx.DiGraph(tsg)
posarray = np.zeros(((dim * max_lag), 2))
for i in range((dim * max_lag)):
posarray[i] = np.array([(i % max_lag), (i // max_lag)])
pos_tmp = {}
(xmin, ymin) = posarray.min(axis=0, initial=0)
(xmax, ymax) = posarray.max(axis=0, initial=0)
for i in range((dim * max_lag)):
pos_tmp[i] = np.array([(((i % max_lag) - xmin) / (xmax - xmin)), (((i // max_lag) - ymin) / (ymax - ymin))])
pos_tmp[i][np.isnan(pos_tmp[i])] = 0.0
pos = {}
for n in range(dim):
for tau in range(max_lag):
pos[flat(n, tau)] = pos_tmp[((((dim - order[n]) - 1) * max_lag) + tau)]
for i in range(dim):
trans = transforms.blended_transform_factory(fig.transFigure, ax.transData)
ax.text(label_space_left, pos[(order[i] * max_lag)][1], f'{var_names[order[i]]}', fontsize=label_fontsize, horizontalalignment='left', verticalalignment='center', transform=trans)
for tau in np.arange((max_lag - 1), (- 1), (- 1)):
trans = transforms.blended_transform_factory(ax.transData, fig.transFigure)
if (tau == (max_lag - 1)):
ax.text(pos[tau][0], (1.0 - label_space_top), '$t$', fontsize=label_fontsize, horizontalalignment='center', verticalalignment='top', transform=trans)
else:
ax.text(pos[tau][0], (1.0 - label_space_top), f'$t-{((max_lag - tau) - 1)}$', fontsize=label_fontsize, horizontalalignment='center', verticalalignment='top', transform=trans)
nx.draw(G, pos=pos)
pyplot.show() |
.parametrize('column_props,expected', [({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'bigint', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, IntType(bits=64, signed=True)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'int', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, IntType(bits=32, signed=True)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'smallint', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, IntType(bits=16, signed=True)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'double precision', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, FloatType(bits=64)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'real', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, FloatType(bits=32)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'boolean', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, BoolType()), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'text', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': 65536, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, StringType(bytes_=65536, variable=True)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'character varying', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': 255, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, StringType(bytes_=255, variable=True)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'char', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': 255, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, StringType(bytes_=255, variable=False)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'bytea', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, BytesType(bytes_=MAX_FIELD_SIZE)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'bit', 'CHARACTER_MAXIMUM_LENGTH': 1, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, BytesType(bytes_=1, variable=False)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'bit', 'CHARACTER_MAXIMUM_LENGTH': 17, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None}, BytesType(bytes_=3, variable=False)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'timestamp', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None, 'DATETIME_PRECISION': 3}, IntType(bits=64, logical='build.recap.Timestamp', unit='millisecond')), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'timestamp', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': None, 'NUMERIC_SCALE': None, 'UDT_NAME': None, 'DATETIME_PRECISION': 3}, IntType(bits=64, logical='build.recap.Timestamp', unit='millisecond')), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'decimal', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': 10, 'NUMERIC_SCALE': 2, 'UDT_NAME': None}, BytesType(logical='build.recap.Decimal', bytes_=32, variable=False, precision=10, scale=2)), ({'COLUMN_NAME': 'test_column', 'DATA_TYPE': 'numeric', 'CHARACTER_MAXIMUM_LENGTH': None, 'CHARACTER_OCTET_LENGTH': None, 'NUMERIC_PRECISION': 5, 'NUMERIC_SCALE': 0, 'UDT_NAME': None}, BytesType(logical='build.recap.Decimal', bytes_=32, variable=False, precision=5, scale=0))])
def test_postgresql_converter(column_props, expected):
result = PostgresqlConverter()._parse_type(column_props)
assert (result == expected) |
class Brightness(object):
def __init__(self, value):
self.value = max(min(value, 1.0), (- 1.0))
def __call__(self, *inputs):
outputs = []
for (idx, _input) in enumerate(inputs):
_input = th.clamp(_input.float().add(self.value).type(_input.type()), 0, 1)
outputs.append(_input)
return (outputs if (idx > 1) else outputs[0]) |
class TestLoader(TestCase):
def test_handles_file(self):
sample = inspect.cleandoc('TAP version 13\n 1..2\n # This is a diagnostic.\n ok 1 A passing test\n not ok 2 A failing test\n This is an unknown line.\n Bail out! This test would abort.\n ')
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(sample.encode('utf-8'))
temp.close()
loader = Loader()
suite = loader.load_suite_from_file(temp.name)
self.assertEqual(3, len(suite._tests))
def test_file_does_not_exist(self):
loader = Loader()
suite = loader.load_suite_from_file('phony.tap')
self.assertEqual(1, len(suite._tests))
self.assertEqual('{filename} does not exist.'.format(filename='phony.tap'), suite._tests[0]._line.description)
def test_handles_directory(self):
directory = tempfile.mkdtemp()
sub_directory = os.path.join(directory, 'sub')
os.mkdir(sub_directory)
with open(os.path.join(directory, 'a_file.tap'), 'w') as f:
f.write('ok A passing test')
with open(os.path.join(sub_directory, 'another_file.tap'), 'w') as f:
f.write('not ok A failing test')
loader = Loader()
suite = loader.load([directory])
self.assertEqual(2, len(suite._tests))
def test_errors_with_multiple_version_lines(self):
sample = inspect.cleandoc('TAP version 13\n TAP version 13\n 1..0\n ')
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(sample.encode('utf-8'))
temp.close()
loader = Loader()
suite = loader.load_suite_from_file(temp.name)
self.assertEqual(1, len(suite._tests))
self.assertEqual('Multiple version lines appeared.', suite._tests[0]._line.description)
def test_errors_with_version_not_on_first_line(self):
sample = inspect.cleandoc("# Something that doesn't belong.\n TAP version 13\n 1..0\n ")
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(sample.encode('utf-8'))
temp.close()
loader = Loader()
suite = loader.load_suite_from_file(temp.name)
self.assertEqual(1, len(suite._tests))
self.assertEqual('The version must be on the first line.', suite._tests[0]._line.description)
def test_skip_plan_aborts_loading(self):
sample = inspect.cleandoc('1..0 # Skipping this test file.\n ok This should not get processed.\n ')
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(sample.encode('utf-8'))
temp.close()
loader = Loader()
suite = loader.load_suite_from_file(temp.name)
self.assertEqual(1, len(suite._tests))
self.assertEqual('Skipping this test file.', suite._tests[0]._line.description)
('tap.parser.sys.stdin', StringIO(''))
def test_loads_from_stream(self):
loader = Loader()
suite = loader.load_suite_from_stdin()
self.assertTrue(isinstance(suite, unittest.TestSuite)) |
class StorageLookup(BaseDB):
logger = get_extended_debug_logger('eth.db.storage.StorageLookup')
_write_trie: HexaryTrie
_trie_nodes_batch: BatchDB
_historical_write_tries: List[PendingWrites]
def __init__(self, db: DatabaseAPI, storage_root: Hash32, address: Address) -> None:
self._db = db
self._initialize_to_root_hash(storage_root)
self._address = address
def _get_write_trie(self) -> HexaryTrie:
if (self._trie_nodes_batch is None):
self._trie_nodes_batch = BatchDB(self._db, read_through_deletes=True)
if (self._write_trie is None):
batch_db = self._trie_nodes_batch
self._write_trie = HexaryTrie(batch_db, root_hash=self._starting_root_hash, prune=True)
return self._write_trie
def _get_read_trie(self) -> HexaryTrie:
if (self._write_trie is not None):
return self._write_trie
else:
return HexaryTrie(self._db, root_hash=self._starting_root_hash)
def _decode_key(self, key: bytes) -> bytes:
padded_slot = pad32(key)
return keccak(padded_slot)
def __getitem__(self, key: bytes) -> bytes:
hashed_slot = self._decode_key(key)
read_trie = self._get_read_trie()
try:
return read_trie[hashed_slot]
except trie_exceptions.MissingTrieNode as exc:
raise MissingStorageTrieNode(exc.missing_node_hash, self._starting_root_hash, exc.requested_key, exc.prefix, self._address) from exc
def __setitem__(self, key: bytes, value: bytes) -> None:
hashed_slot = self._decode_key(key)
write_trie = self._get_write_trie()
write_trie[hashed_slot] = value
def _exists(self, key: bytes) -> bool:
hashed_slot = self._decode_key(key)
read_trie = self._get_read_trie()
return (hashed_slot in read_trie)
def __delitem__(self, key: bytes) -> None:
hashed_slot = self._decode_key(key)
write_trie = self._get_write_trie()
try:
del write_trie[hashed_slot]
except trie_exceptions.MissingTrieNode as exc:
raise MissingStorageTrieNode(exc.missing_node_hash, self._starting_root_hash, exc.requested_key, exc.prefix, self._address) from exc
def has_changed_root(self) -> bool:
return (self._write_trie is not None)
def get_changed_root(self) -> Hash32:
if (self._write_trie is not None):
return self._write_trie.root_hash
else:
raise ValidationError('Asked for changed root when no writes have been made')
def _initialize_to_root_hash(self, root_hash: Hash32) -> None:
self._starting_root_hash = root_hash
self._write_trie = None
self._trie_nodes_batch = None
self._historical_write_tries = []
def commit_to(self, db: DatabaseAPI) -> None:
if (self._trie_nodes_batch is None):
raise ValidationError(f"It is invalid to commit an account's storage if it has no pending changes. Always check storage_lookup.has_changed_root before attempting to commit. Write tries on stack = {len(self._historical_write_tries)}; Root hash = {encode_hex(self._starting_root_hash)}")
self._trie_nodes_batch.commit_to(db, apply_deletes=False)
self._initialize_to_root_hash(self._write_trie.root_hash)
def new_trie(self) -> int:
write_trie = self._get_write_trie()
self._historical_write_tries.append(PendingWrites(write_trie, self._trie_nodes_batch, self._starting_root_hash))
new_idx = len(self._historical_write_tries)
self._starting_root_hash = BLANK_ROOT_HASH
self._write_trie = None
self._trie_nodes_batch = None
return new_idx
def rollback_trie(self, trie_index: int) -> None:
if (trie_index >= len(self._historical_write_tries)):
raise ValidationError(f'Trying to roll back a delete to index {trie_index}, but there are only {len(self._historical_write_tries)} indices available.')
(self._write_trie, self._trie_nodes_batch, self._starting_root_hash) = self._historical_write_tries[trie_index]
del self._historical_write_tries[trie_index:] |
class FasterRcnnBoxCoder(box_coder.BoxCoder):
def __init__(self, scale_factors=None):
if scale_factors:
assert (len(scale_factors) == 4)
for scalar in scale_factors:
assert (scalar > 0)
self._scale_factors = scale_factors
def code_size(self):
return 4
def _encode(self, boxes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes()
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = ((xcenter - xcenter_a) / wa)
ty = ((ycenter - ycenter_a) / ha)
tw = tf.log((w / wa))
th = tf.log((h / ha))
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
(ty, tx, th, tw) = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = (tf.exp(tw) * wa)
h = (tf.exp(th) * ha)
ycenter = ((ty * ha) + ycenter_a)
xcenter = ((tx * wa) + xcenter_a)
ymin = (ycenter - (h / 2.0))
xmin = (xcenter - (w / 2.0))
ymax = (ycenter + (h / 2.0))
xmax = (xcenter + (w / 2.0))
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) |
class TrainableFidelityQuantumKernel(TrainableKernel, FidelityQuantumKernel):
def __init__(self, *, feature_map: (QuantumCircuit | None)=None, fidelity: (BaseStateFidelity | None)=None, training_parameters: ((ParameterVector | Sequence[Parameter]) | None)=None, enforce_psd: bool=True, evaluate_duplicates: str='off_diagonal') -> None:
super().__init__(feature_map=feature_map, fidelity=fidelity, training_parameters=training_parameters, enforce_psd=enforce_psd, evaluate_duplicates=evaluate_duplicates)
self._num_features = (feature_map.num_parameters - self._num_training_parameters)
self._feature_parameters = [parameter for parameter in feature_map.parameters if (parameter not in self._training_parameters)]
self._parameter_dict = {parameter: None for parameter in feature_map.parameters}
def _get_parameterization(self, x_vec: np.ndarray, y_vec: np.ndarray) -> tuple[(np.ndarray, np.ndarray, KernelIndices)]:
new_x_vec = self._parameter_array(x_vec)
new_y_vec = self._parameter_array(y_vec)
return super()._get_parameterization(new_x_vec, new_y_vec)
def _get_symmetric_parameterization(self, x_vec: np.ndarray) -> tuple[(np.ndarray, np.ndarray, KernelIndices)]:
new_x_vec = self._parameter_array(x_vec)
return super()._get_symmetric_parameterization(new_x_vec) |
class GINDeepSigns(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, k, dim_pe, rho_num_layers, use_bn=False, use_ln=False, dropout=0.5, activation='relu'):
super().__init__()
self.enc = GIN(in_channels, hidden_channels, out_channels, num_layers, use_bn=use_bn, dropout=dropout, activation=activation)
rho_dim = (out_channels * k)
self.rho = MLP(rho_dim, hidden_channels, dim_pe, rho_num_layers, use_bn=use_bn, dropout=dropout, activation=activation)
def forward(self, x, edge_index, batch_index):
N = x.shape[0]
x = x.transpose(0, 1)
x = (self.enc(x, edge_index) + self.enc((- x), edge_index))
x = x.transpose(0, 1).reshape(N, (- 1))
x = self.rho(x)
return x |
def _lookups_parts_puzzlenames_protodefs(parts):
parts_dict = dict()
puzzlename_tags_dict = dict()
puzzle_ingredients = dict()
for part in parts:
parts_dict[part.dbref] = part
protodef = proto_def(part, with_tags=False)
del protodef['prototype_key']
puzzle_ingredients[part.dbref] = protodef
tags_categories = part.tags.all(return_key_and_category=True)
for (tag, category) in tags_categories:
if (category != _PUZZLES_TAG_CATEGORY):
continue
if (tag not in puzzlename_tags_dict):
puzzlename_tags_dict[tag] = []
puzzlename_tags_dict[tag].append(part.dbref)
return (parts_dict, puzzlename_tags_dict, puzzle_ingredients) |
def check_conversion_tensor_names(model, custom_objects=None):
tf.keras.backend.clear_session()
def get_converted_models_weight_names(converted_model_path) -> set:
converted_weight_names = set()
with tf.compat.v1.Session() as persisted_sess:
with gfile.FastGFile(converted_model_path, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
persisted_sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
for op in persisted_sess.graph.get_operations():
converted_weight_names.add(op.name)
return converted_weight_names
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_symmetric': 'True', 'is_quantized': 'True'}, 'per_channel_quantization': 'False'}, 'params': {}, 'op_type': {'Conv': {'is_input_quantized': 'True', 'is_output_quantized': 'True'}, 'ConvTranspose': {'is_input_quantized': 'True', 'is_output_quantized': 'True'}, 'Gemm': {'is_input_quantized': 'True', 'is_output_quantized': 'True'}, 'MatMul': {'is_input_quantized': 'True', 'is_output_quantized': 'True'}, 'MaxPooling2D': {'is_input_quantized': 'True', 'is_output_quantized': 'True'}}, 'supergroups': [], 'model_input': {}, 'model_output': {'is_output_quantized': 'True'}}
with tempfile.TemporaryDirectory() as tmp_dir:
config_path = os.path.join(tmp_dir, 'config.json')
with open(config_path, 'w') as f:
json.dump(quantsim_config, f)
random_input_data = tf.random.normal(shape=(128, *model.input_shape[1:]))
_ = model(random_input_data)
sim = QuantizationSimModel(model, quant_scheme='tf', config_file=config_path)
sim.compute_encodings((lambda m, _: m.predict(random_input_data)), None)
sim.export(tmp_dir, model.name)
encodings = sim.get_encodings_dict()
encoding_weight_names = {*encodings['param_encodings'].keys(), *encodings['activation_encodings'].keys()}
original_weight_names = {weight_name.split(':')[0] for weight_name in encoding_weight_names if ('dropout' not in weight_name)}
converted_weight_names = get_converted_models_weight_names(os.path.join(tmp_dir, f'{model.name}_converted.pb'))
missing_weight_names = original_weight_names.difference(converted_weight_names)
assert (not missing_weight_names), f'Weight name(s): {missing_weight_names} are missing' |
def main():
parser = argparse.ArgumentParser(description='SMT-LIB Parser Benchmarking')
parser.add_argument('--base', type=str, nargs='?', help='top-directory of the benchmarks')
parser.add_argument('--count', type=int, nargs='?', default=(- 1), help='number of files to benchmark')
parser.add_argument('--out', type=str, default='stats.out', nargs='?', help='Where to save the statistics')
args = parser.parse_args()
random.seed(42)
p = multiprocessing.Pool()
chunks = multiprocessing.cpu_count()
file_list = list(get_all_smt_files(args.base))
random.shuffle(file_list)
if (args.count == (- 1)):
files_cnt = len(file_list)
else:
files_cnt = args.count
print(('Submitting %d jobs, %d at the time' % (files_cnt, chunks)))
timings = p.map(execute_script_fname, islice(file_list, files_cnt), chunks)
mean = (sum((x[0] for x in timings)) / len(timings))
print(('The mean execution time was %0.2f seconds' % mean))
print(('The max execution time was %0.2f seconds' % max((x[0] for x in timings))))
outfile = args.out
dump_stats(timings, outfile)
print(("The statistics file has been generated in '%s'" % outfile)) |
.parametrize('p, size', [(np.array(0.5, dtype=config.floatX), None), (np.array(0.5, dtype=config.floatX), []), (np.array(0.5, dtype=config.floatX), [2, 3]), (np.full((1, 2), 0.5, dtype=config.floatX), None)])
def test_bernoulli_samples(p, size):
compare_sample_values(bernoulli, p, size=size, test_fn=(lambda *args, size=None, random_state=None, **kwargs: bernoulli.rng_fn(random_state, *(args + (size,))))) |
_plugins((ep for ep in (list(iter_entry_points('rasterio.rio_commands')) + list(iter_entry_points('rasterio.rio_plugins')))))
()
_opt
_opt
('--aws-profile', help='Select a profile from the AWS credentials file')
('--aws-no-sign-requests', is_flag=True, help='Make requests anonymously')
('--aws-requester-pays', is_flag=True, help='Requester pays data transfer costs')
_option(version=rasterio.__version__, message='%(version)s')
('--gdal-version', is_eager=True, is_flag=True, callback=gdal_version_cb)
('--show-versions', help='Show dependency versions', is_eager=True, is_flag=True, callback=show_versions_cb)
_context
def main_group(ctx, verbose, quiet, aws_profile, aws_no_sign_requests, aws_requester_pays, gdal_version, show_versions):
verbosity = (verbose - quiet)
configure_logging(verbosity)
ctx.obj = {}
ctx.obj['verbosity'] = verbosity
ctx.obj['aws_profile'] = aws_profile
envopts = {'CPL_DEBUG': (verbosity > 2)}
if (aws_profile or aws_no_sign_requests or aws_requester_pays):
ctx.obj['env'] = rasterio.Env(session=AWSSession(profile_name=aws_profile, aws_unsigned=aws_no_sign_requests, requester_pays=aws_requester_pays), **envopts)
else:
ctx.obj['env'] = rasterio.Env(**envopts) |
class SysNlg(AbstractNlg):
def __init__(self, domain, complexity):
super().__init__(domain=domain, complexity=complexity)
self.domain = domain
self.complexity = complexity
def generate_sent(self, actions, domain=None, templates=SysCommonNlg.templates, generator=None, context=None):
str_actions = []
lexicalized_actions = []
if (not isinstance(actions, list)):
actions = [actions]
if (generator is None):
actions_with_parameters = [SystemAct.ASK_TYPE, SystemAct.PRESENT_RESULT, SystemAct.PROVIDE_INFO, SystemAct.BOOKING_SUCCESS, SystemAct.ASK_RESERVATION_INFO]
actions_without_parameters = [SystemAct.NOMATCH_RESULT, SystemAct.NO_OTHER, SystemAct.BOOKING_FAIL, SystemAct.GOODBYE]
for a in actions:
a_copy = copy.deepcopy(a)
if (a.act in actions_without_parameters):
if domain:
str_actions.append(domain.greet)
else:
str_actions.append(self.sample(templates[a.act]))
elif (a.act == SystemAct.ASK_TYPE):
args = [p for p in a.parameters]
if (len(args) == 1):
args = args[0]
elif (len(args) == 2):
args = ((args[0] + ' and ') + args[1])
elif (len(args) == 3):
args = ((((args[0] + ', ') + args[1]) + ' and ') + args[2])
str_actions.append(self.sample(templates[a.act]).replace('[informable_slot]', args))
elif (a.act == SystemAct.PRESENT_RESULT):
sent = self.sample(templates[a.act])
if ('name' in a.parameters):
sent = sent.replace('[name]', a.parameters['name'])
if ('food' in a.parameters):
sent = sent.replace('[food]', a.parameters['food'])
if ('area' in a.parameters):
sent = sent.replace('[area]', a.parameters['area'])
if ('pricerange' in a.parameters):
sent = sent.replace('[pricerange]', a.parameters['pricerange'])
str_actions.append(sent)
elif (a.act == SystemAct.PROVIDE_INFO):
sent = self.sample(templates[a.act])
if ('name' in a.parameters):
sent = sent.replace('[name]', a.parameters['name'])
if ('address' in a.parameters):
sent = sent.replace('[address]', a.parameters['address'])
if ('phone' in a.parameters):
sent = sent.replace('[phone]', a.parameters['phone'])
if ('postcode' in a.parameters):
sent = sent.replace('[postcode]', a.parameters['postcode'])
str_actions.append(sent)
elif (a.act == SystemAct.BOOKING_SUCCESS):
sent = self.sample(templates[a.act])
if ('reference' in a.parameters):
sent = sent.replace('[reference]', a.parameters['reference'])
str_actions.append(sent)
elif (a.act == SystemAct.ASK_RESERVATION_INFO):
sent = self.sample(templates[a.act])
args = [p for p in a.parameters]
if (len(args) == 3):
args = 'how many people and what day and time'
elif (len(args) == 2):
if (('people' in args) and ('day' in args)):
args = 'how many people and what day'
elif (('people' in args) and ('time' in args)):
args = 'how many people and what time'
elif (('time' in args) and ('day' in args)):
args = 'what day and time'
elif (len(args) == 1):
if ('people' in args):
args = 'how many people'
else:
args = ('what ' + args[0])
else:
print(args)
sent = sent.replace('[reservation_slot]', args)
str_actions.append(sent)
else:
raise ValueError(('Unknown dialog act %s' % a.act))
lexicalized_actions.append(a_copy)
return (' '.join(str_actions), lexicalized_actions)
else:
raise NotImplementedError
return (' '.join(str_actions), lexicalized_actions) |
def test_osv_skipped_dep():
osv = service.OsvService()
dep = service.SkippedDependency(name='foo', skip_reason='skip-reason')
results: dict[(service.Dependency, list[service.VulnerabilityResult])] = dict(osv.query_all(iter([dep])))
assert (len(results) == 1)
assert (dep in results)
vulns = results[dep]
assert (len(vulns) == 0) |
class CallbackQuery(Object, Update):
def __init__(self, *, client: 'pyrogram.Client'=None, id: str, from_user: 'types.User', chat_instance: str, message: 'types.Message'=None, inline_message_id: str=None, data: Union[(str, bytes)]=None, game_short_name: str=None, matches: List[Match]=None):
super().__init__(client)
self.id = id
self.from_user = from_user
self.chat_instance = chat_instance
self.message = message
self.inline_message_id = inline_message_id
self.data = data
self.game_short_name = game_short_name
self.matches = matches
async def _parse(client: 'pyrogram.Client', callback_query, users) -> 'CallbackQuery':
message = None
inline_message_id = None
if isinstance(callback_query, raw.types.UpdateBotCallbackQuery):
chat_id = utils.get_peer_id(callback_query.peer)
message_id = callback_query.msg_id
message = client.message_cache[(chat_id, message_id)]
if (not message):
message = (await client.get_messages(chat_id, message_id))
elif isinstance(callback_query, raw.types.UpdateInlineBotCallbackQuery):
inline_message_id = utils.pack_inline_message_id(callback_query.msg_id)
try:
data = callback_query.data.decode()
except (UnicodeDecodeError, AttributeError):
data = callback_query.data
return CallbackQuery(id=str(callback_query.query_id), from_user=types.User._parse(client, users[callback_query.user_id]), message=message, inline_message_id=inline_message_id, chat_instance=str(callback_query.chat_instance), data=data, game_short_name=callback_query.game_short_name, client=client)
async def answer(self, text: str=None, show_alert: bool=None, url: str=None, cache_time: int=0):
return (await self._client.answer_callback_query(callback_query_id=self.id, text=text, show_alert=show_alert, url=url, cache_time=cache_time))
async def edit_message_text(self, text: str, parse_mode: Optional['enums.ParseMode']=None, disable_web_page_preview: bool=None, reply_markup: 'types.InlineKeyboardMarkup'=None) -> Union[('types.Message', bool)]:
if (self.inline_message_id is None):
return (await self._client.edit_message_text(chat_id=self.message.chat.id, message_id=self.message.id, text=text, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup))
else:
return (await self._client.edit_inline_text(inline_message_id=self.inline_message_id, text=text, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup))
async def edit_message_caption(self, caption: str, parse_mode: Optional['enums.ParseMode']=None, reply_markup: 'types.InlineKeyboardMarkup'=None) -> Union[('types.Message', bool)]:
return (await self.edit_message_text(caption, parse_mode, reply_markup=reply_markup))
async def edit_message_media(self, media: 'types.InputMedia', reply_markup: 'types.InlineKeyboardMarkup'=None) -> Union[('types.Message', bool)]:
if (self.inline_message_id is None):
return (await self._client.edit_message_media(chat_id=self.message.chat.id, message_id=self.message.id, media=media, reply_markup=reply_markup))
else:
return (await self._client.edit_inline_media(inline_message_id=self.inline_message_id, media=media, reply_markup=reply_markup))
async def edit_message_reply_markup(self, reply_markup: 'types.InlineKeyboardMarkup'=None) -> Union[('types.Message', bool)]:
if (self.inline_message_id is None):
return (await self._client.edit_message_reply_markup(chat_id=self.message.chat.id, message_id=self.message.id, reply_markup=reply_markup))
else:
return (await self._client.edit_inline_reply_markup(inline_message_id=self.inline_message_id, reply_markup=reply_markup)) |
class InlineInputMessage():
def __init__(self, text, syntax=None, preview=True):
self.text = text
self.syntax = syntax
self.preview = preview
def _serialize(self):
args = {'message_text': self.text, 'disable_web_page_preview': (not self.preview)}
syntax = syntaxes.guess_syntax(self.text, self.syntax)
if syntax:
args['parse_mode'] = syntax
return args |
def test_total():
assert (get_typed_dict_shape(Foo) == Shape(input=InputShape(constructor=Foo, kwargs=None, fields=(InputField(type=int, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=None), InputField(type=str, id='b', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=None), InputField(type=bool, id='c', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=None)), params=(Param(field_id='a', name='a', kind=ParamKind.KW_ONLY), Param(field_id='b', name='b', kind=ParamKind.KW_ONLY), Param(field_id='c', name='c', kind=ParamKind.KW_ONLY)), overriden_types=frozenset({})), output=OutputShape(fields=(OutputField(type=int, id='a', default=NoDefault(), metadata=MappingProxyType({}), accessor=create_key_accessor('a', access_error=None), original=None), OutputField(type=str, id='b', default=NoDefault(), metadata=MappingProxyType({}), accessor=create_key_accessor('b', access_error=None), original=None), OutputField(type=bool, id='c', default=NoDefault(), metadata=MappingProxyType({}), accessor=create_key_accessor('c', access_error=None), original=None)), overriden_types=frozenset({})))) |
class RenderTarget(GuiRenderComponent):
source = ShowInInspector(Camera, None)
depth = ShowInInspector(float, 0.0)
canvas = ShowInInspector(bool, True, 'Render Canvas')
flipY = 1
def __init__(self):
super(RenderTarget, self).__init__()
self.setup = False
self.size = Vector2.zero()
self.texture = None
self.renderPass = False
def PreRender(self):
if (self.renderPass or (self.source is None)):
return
self.renderPass = True
if (self.source is self.scene.mainCamera):
if self.scene.mainCamera.renderPass:
raise PyUnityException('Cannot render main camera with main camera')
rectTransform = self.GetComponent(RectTransform)
if (rectTransform is None):
return
previousShader = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
previousVAO = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING)
previousVBO = gl.glGetIntegerv(gl.GL_ARRAY_BUFFER_BINDING)
previousFBO = gl.glGetIntegerv(gl.GL_DRAW_FRAMEBUFFER_BINDING)
previousViewport = gl.glGetIntegerv(gl.GL_VIEWPORT)
previousDepthMask = gl.glGetIntegerv(gl.GL_DEPTH_WRITEMASK)
self.genBuffers()
size = (rectTransform.GetRect() + rectTransform.offset).size()
if (size != self.size):
self.setSize(size)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.framebuffer)
gl.glDepthMask(gl.GL_TRUE)
self.source.Resize(*self.size)
renderers = self.scene.FindComponents(MeshRenderer)
lights = self.scene.FindComponents(Light)
self.source.renderPass = True
self.source.RenderDepth(renderers, lights)
self.source.RenderScene(renderers, lights)
self.source.RenderSkybox()
if (self.canvas and (self.source.canvas is not None)):
previousProjection = self.source.guiShader.uniforms['projection']
self.source.Render2D()
self.source.guiShader.setMat4(b'projection', previousProjection)
gl.glUseProgram(previousShader)
gl.glBindVertexArray(previousVAO)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, previousVBO)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, previousFBO)
gl.glViewport(*previousViewport)
gl.glDepthMask(previousDepthMask)
self.renderPass = False
def saveImg(self, path):
previousFBO = gl.glGetIntegerv(gl.GL_DRAW_FRAMEBUFFER_BINDING)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.framebuffer)
data = gl.glReadPixels(0, 0, *self.size, gl.GL_RGB, gl.GL_UNSIGNED_BYTE)
im = Image.frombytes('RGB', tuple(self.size), data)
im.transpose(Image.Transpose.FLIP_TOP_BOTTOM).save(path)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, previousFBO)
def genBuffers(self, force=False):
if (self.setup and (not force)):
return
self.framebuffer = gl.glGenFramebuffers(1)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.framebuffer)
self.texID = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.texID)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, *Screen.size, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, None)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
self.texture = Texture2D.FromOpenGL(self.texID)
self.renderbuffer = gl.glGenRenderbuffers(1)
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.renderbuffer)
gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_DEPTH_COMPONENT, *Screen.size)
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_RENDERBUFFER, self.renderbuffer)
gl.glFramebufferTexture(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, self.texID, 0)
gl.glDrawBuffers(1, convert(ctypes.c_int, [gl.GL_COLOR_ATTACHMENT0]))
if (gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE):
raise PyUnityException('Framebuffer setup failed')
self.setup = True
def setSize(self, size):
self.size = round(size)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.texID)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, *self.size, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, None)
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.renderbuffer)
gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_DEPTH_COMPONENT, *self.size) |
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', 'cross_lingual_lm', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr-scheduler', 'reduce_lr_on_plateau', '--lr-shrink', '0.5', '--lr', '0.0001', '--stop-min-lr', '1e-09', '--dropout', '0.1', '--attention-dropout', '0.1', '--criterion', 'legacy_masked_lm_loss', '--masked-lm-only', '--monolingual-langs', 'in,out', '--num-segment', '5', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--dataset-impl', 'raw', '--num-workers', '0'] + list(extra_args)))
train.main(train_args) |
_kernel_api(params={'p': POINTER, 'getattrlistbulk_args': POINTER, 'retval': POINTER})
def hook__getattrlistbulk(ql, address, params):
getattrlistbulk_args = getattrlistbulk_args_t(ql, params['getattrlistbulk_args']).loadFromMem()
dirfd = ql.os.ev_manager.map_fd[getattrlistbulk_args.dirfd]
vfs_attr_pack = ql.loader.kernel_extrn_symbols_detail[b'_vfs_attr_pack']['n_value']
uiovp_addr = ql.os.heap.alloc(ctypes.sizeof(user_iovec_t))
uiovp = user_iovec_t(ql, uiovp_addr)
uiovp.iov_base = getattrlistbulk_args.attributeBuffer
uiovp.iov_len = getattrlistbulk_args.bufferSize
uiovp.updateToMem()
uio_addr = ql.os.heap.alloc(ctypes.sizeof(uio_t))
uio = uio_t(ql, uio_addr)
uio.uio_iovs = iovecs_t(kiovp=POINTER64(uiovp_addr), uiovp=POINTER64(uiovp_addr))
uio.uio_iovcnt = 1
uio.uio_offset = 0
uio.uio_segflg = 8
uio.uio_rw = 0
uio.uio_resid_64 = getattrlistbulk_args.bufferSize
uio.uio_size = 72
uio.uio_max_iovs = 1
uio.uio_flags = 1
uio.updateToMem()
result = 0
for path in dirfd.iterdir():
result += 1
ql.mem.write(params['retval'], struct.pack('<Q', result))
for path in dirfd.iterdir():
info = path.stat()
vap_addr = ql.os.heap.alloc(ctypes.sizeof(vnode_attr_t))
vap = vnode_attr_t(ql, vap_addr)
vap.va_supported =
vap.va_active =
vap.va_nlink = info.st_nlink
vap.va_total_size = info.st_size
vap.va_data_size = info.st_size
vap.va_uid = info.st_uid
vap.va_gid = info.st_gid
vap.va_mode = info.st_mode
vap.va_fileid = info.st_ino
vap.va_devid = info.st_dev
vap.va_create_time = timespec_t(tv_sec=int(info.st_ctime), tv_nsec=(info.st_ctime_ns % 1000000))
vap.va_access_time = timespec_t(tv_sec=int(info.st_atime), tv_nsec=(info.st_atime_ns % 1000000))
vap.va_modify_time = timespec_t(tv_sec=int(info.st_mtime), tv_nsec=(info.st_mtime_ns % 1000000))
truename = (path.name + '\x00')
vap.va_name = POINTER64(ql.os.heap.alloc(1024))
ql.mem.write(vap.va_name.value, truename.encode())
vap.updateToMem()
code = gen_stub_code(ql, [0, uio.base, getattrlistbulk_args.alist, getattrlistbulk_args.options, vap.base, 0, params['p']], vfs_attr_pack)
print(('Trampoline created at 0x%x for %s (0x%x) and 0x%x' % (code, truename, vap.va_name.value, vap.base)))
ql.stack_push(code)
return |
class InstanceNormModel(torch.nn.Module):
def __init__(self):
super(InstanceNormModel, self).__init__()
self.conv1 = torch.nn.Conv2d(10, 20, 3)
self.in1 = torch.nn.InstanceNorm2d(20)
self.relu1 = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.in1(x)
x = self.relu1(x)
return x |
class TimeInstanceNorm(nn.Module):
def __init__(self, eps=1e-05):
super().__init__()
self.eps = eps
def cal_stats(self, x):
(b, c, t) = x.shape
mean = x.mean(1)
std = (x.var(1) + self.eps).sqrt()
mean = mean.view(b, 1, t)
std = std.view(b, 1, t)
return (mean, std)
def forward(self, x, return_stats=False):
(mean, std) = self.cal_stats(x)
x = ((x - mean) / std)
if return_stats:
return (x, mean, std)
else:
return x |
def build_dataset(cfg, default_args=None):
if (cfg['type'] == 'RepeatDataset'):
from .dataset_wrappers import RepeatDataset
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset |
def available_readers(as_dict=False, yaml_loader=UnsafeLoader):
readers = []
for reader_configs in configs_for_reader():
try:
reader_info = read_reader_config(reader_configs, loader=yaml_loader)
except (KeyError, IOError, yaml.YAMLError):
LOG.debug('Could not import reader config from: %s', reader_configs)
LOG.debug('Error loading YAML', exc_info=True)
continue
readers.append((reader_info if as_dict else reader_info['name']))
if as_dict:
readers = sorted(readers, key=(lambda reader_info: reader_info['name']))
else:
readers = sorted(readers)
return readers |
_criterion('model', dataclass=ModelCriterionConfig)
class ModelCriterion(FairseqCriterion):
def __init__(self, task, loss_weights=None, log_keys=None):
super().__init__(task)
self.loss_weights = loss_weights
self.log_keys = log_keys
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
sample_size = net_output['sample_size']
scaled_losses = {}
if hasattr(model, 'get_losses'):
losses = model.get_losses(net_output, sample)
elif (isinstance(net_output, dict) and ('losses' in net_output)):
losses = net_output['losses']
else:
raise Exception('Could not retrieve losses')
for (lk, p) in losses.items():
try:
coef = (1.0 if (len(self.loss_weights) == 0) else self.loss_weights[lk])
except KeyError:
logger.error(f'weight for loss {lk} is not in loss_weights ({self.loss_weights})')
raise
if ((coef != 0) and (p is not None)):
scaled_losses[lk] = (coef * p.float())
loss = sum(scaled_losses.values())
if (reduce and (loss.numel() > 1)):
loss = loss.sum()
logging_output = {'loss': loss.data, 'ntokens': sample_size, 'nsentences': sample['id'].numel(), 'sample_size': sample_size, '_world_size': 1}
for lk in self.log_keys:
if (lk in net_output):
logging_output[lk] = float(net_output[lk])
if (len(scaled_losses) > 1):
for (lk, l) in scaled_losses.items():
logging_output[f'loss_{lk}'] = l.item()
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
ntokens = utils.item(sum((log.get('ntokens', 0) for log in logging_outputs)))
nsentences = utils.item(sum((log.get('nsentences', 0) for log in logging_outputs)))
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
metrics.log_scalar('loss', (loss_sum / sample_size), sample_size, round=3)
metrics.log_scalar('ntokens', ntokens)
metrics.log_scalar('nsentences', nsentences)
builtin_keys = {'loss', 'ntokens', 'nsentences', 'sample_size', '_world_size'}
world_size = utils.item(sum((log.get('_world_size', 0) for log in logging_outputs)))
for k in logging_outputs[0]:
if (k not in builtin_keys):
val = sum((log.get(k, 0) for log in logging_outputs))
if k.startswith('loss_'):
metrics.log_scalar(k, (val / sample_size), sample_size, round=3)
else:
metrics.log_scalar(k, (val / world_size), round=3)
def logging_outputs_can_be_summed() -> bool:
return True |
def problem_checks():
global _problem_checks
if _problem_checks:
return _problem_checks
_problem_checks = {'not-reporting': {'grace-period': datetime.timedelta(hours=4), 'alert-frequency': datetime.timedelta(days=1), 'spec': {'submitted_at': {'$lt': business_days_ago(1)}}, 'filter': not_reporting_filter}, 'no-location': {'grace-period': datetime.timedelta(days=1), 'spec': {'plugins.geolocation': 'unknown'}}, 'ssh-password-authentication': {'spec': {'$and': [{'plugins.sshd.status': 'running'}, {'plugins.sshd.config.passwordauthentication': 'yes'}]}}, 'ssh-root-password-authentication': {'spec': {'$and': [{'plugins.sshd.status': 'running'}, {'plugins.sshd.config.permitrootlogin': 'yes'}]}}, 'eraagent-absent': {'spec': {'plugins.eraagent.installed': {'$not': {'$eq': True}}}}, 'eraagent-stopped': {'grace-period': datetime.timedelta(hours=4), 'spec': {'plugins.eraagent.running': {'$not': {'$eq': True}}}}, 'eset-absent': {'spec': {'plugins.eset.installed': {'$not': {'$eq': True}}}}, 'eset-out-of-date': {'grace-period': datetime.timedelta(hours=4), 'spec': {'plugins.eset.recent': {'$not': {'$eq': True}}}}, 'eset-stopped': {'grace-period': datetime.timedelta(hours=4), 'spec': {'plugins.eset.running': {'$not': {'$eq': True}}}}, 'os-update-available': {'grace-period': datetime.timedelta(days=90), 'spec': {'plugins.os_updates.release': {'$not': {'$eq': False}}}}, 'os-security-patches-available': {'grace-period': datetime.timedelta(days=1), 'alert-frequency': datetime.timedelta(days=1), 'spec': {'plugins.os_updates.security_patches': {'$not': {'$eq': False}}}}, 'guest-session-enabled': {'spec': {'plugins.guest_session.enabled': {'$not': {'$eq': False}}}}, 'unencrypted-hard-drive': {'spec': {'$or': [{'plugins.hd_encryption.encrypted': {'$eq': False}}, {'plugins.hd_encryption.encrypted': {'$exists': False}}]}}, 'firewall-disabled': {'spec': {'plugins.firewall.status': {'$not': {'$eq': 'on'}}}}, 'screenlock-disabled': {'grace-period': datetime.timedelta(hours=4), 'spec': {'plugins.screenlock.users': {'$exists': True}, 'plugins.screenlock.users.enabled': {'$not': {'$eq': True}}}}, 'deprecated-port': {'grace-period': datetime.timedelta(hours=1)}, 'pending-patches': {'grace-period': datetime.timedelta(hours=4)}, 'expiring-certificate': {}}
return _problem_checks |
def single_proc_playground(local_rank, port, world_size, cfg):
torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:{}'.format(port), world_size=world_size, rank=local_rank)
torch.cuda.set_device(local_rank)
playground(cfg)
torch.distributed.destroy_process_group() |
.parametrize('rich, higher, expected_format', [(True, True, Qt.TextFormat.RichText), (False, False, Qt.TextFormat.PlainText), (None, False, Qt.TextFormat.PlainText)])
.parametrize('replace', ['test', None])
def test_rich_text(view, qtbot, rich, higher, expected_format, replace):
level = usertypes.MessageLevel.info
text = 'with <h1>markup</h1>'
text2 = 'with <h1>markup</h1> 2'
info1 = message.MessageInfo(level, text, replace=replace)
info2 = message.MessageInfo(level, text2, replace=replace, rich=rich)
ctx = (qtbot.wait_signal(view._clear_timer.timeout) if (replace is None) else contextlib.nullcontext())
with ctx:
view.show_message(info1)
assert (len(view._messages) == 1)
view._messages[0].setWordWrap(False)
height1 = view.sizeHint().height()
assert (height1 > 0)
assert (view._messages[0].textFormat() == Qt.TextFormat.PlainText)
view.show_message(info2)
assert (len(view._messages) == 1)
view._messages[0].setWordWrap(False)
height2 = view.sizeHint().height()
assert (height2 > 0)
assert (view._messages[0].textFormat() == expected_format)
if higher:
assert (height2 > height1)
else:
assert (height2 == height1) |
def save_atten(imgpath, atten, num_classes=20, base_dir='../save_bins/', idx_base=0):
atten = np.squeeze(atten)
for cls_idx in range(num_classes):
cat_dir = os.path.join(base_dir, idx2catename['voc20'][cls_idx])
if (not os.path.exists(cat_dir)):
os.mkdir(cat_dir)
cat_map = atten[(cls_idx + idx_base)]
img = cv2.imread(imgpath)
(h, w, _) = np.shape(img)
cat_map = cv2.resize(cat_map, dsize=(w, h))
cat_map = norm_atten_map(cat_map)
save_path = os.path.join(cat_dir, (get_imgId(imgpath) + '.png'))
cv2.imwrite(save_path, cat_map) |
def get_dataloader(tokenizer, examples, label_list, tag):
logger.info('start prepare input data')
cached_train_features_file = os.path.join(args.input_cache_dir, (tag + 'input.pkl'))
try:
with open(cached_train_features_file, 'rb') as reader:
features = pickle.load(reader)
except:
logger.info('start prepare features_res_lab')
features = convert_examples_to_features_Segres(examples, label_list, max_seg_num=args.max_segment_num, max_seq_length=args.max_seq_length, tokenizer=tokenizer)
logger.info(' Saving train features into cached file %s', cached_train_features_file)
with open(cached_train_features_file, 'wb') as writer:
pickle.dump(features, writer)
logger.info(' Num examples = %d', len(examples))
seg_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
seg_token_type_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
seg_attention_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
cls_sep_pos = torch.tensor([f.cls_sep_pos for f in features], dtype=torch.long)
true_len = torch.tensor([f.true_len for f in features], dtype=torch.long)
labels = torch.FloatTensor([f.label_id for f in features])
train_data = TensorDataset(seg_input_ids, seg_token_type_ids, seg_attention_mask, cls_sep_pos, true_len, labels)
if (tag == 'train'):
train_sampler = RandomSampler(train_data)
else:
train_sampler = SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
return train_dataloader |
def main():
args = parse_args()
raw_img = cv2.imread(args.input, 1)
raw_img = cv2.resize(raw_img, (224, 224), interpolation=cv2.INTER_LINEAR)
raw_img = (np.float32(raw_img) / 255)
(image, norm_image) = preprocess_img(raw_img)
model = models.__dict__[args.arch](pretrained=True).eval()
model = model.cuda()
gc = GradCAM(model, target_layer=args.target_layer)
heatmap = gc(norm_image.cuda(), class_idx=args.cls_idx).cpu().data
cam = show_cam(image, heatmap, args.output)
if args.ins_del:
blur = (lambda x: gaussian_blur2d(x, kernel_size=(51, 51), sigma=(50.0, 50.0)))
insertion = CausalMetric(model, 'ins', (224 * 2), substrate_fn=blur)
deletion = CausalMetric(model, 'del', (224 * 2), substrate_fn=torch.zeros_like)
out_video_path = './VIDEO'
check_path_exist(out_video_path)
ins_path = os.path.join(os.path.join(out_video_path, 'ins'))
del_path = os.path.join(os.path.join(out_video_path, 'del'))
check_path_exist(ins_path)
check_path_exist(del_path)
norm_image = norm_image.cpu()
heatmap = heatmap.cpu().numpy()
ins_score = insertion.evaluate(norm_image, mask=heatmap, cls_idx=None, save_to=ins_path)
del_score = deletion.evaluate(norm_image, mask=heatmap, cls_idx=None, save_to=del_path)
print('\nDeletion - {:.5f}\nInsertion - {:.5f}'.format(auc(del_score), auc(ins_score)))
video_ins = os.path.join(ins_path, (args.input.split('/')[(- 1)].split('.')[0] + '.avi'))
video_del = os.path.join(del_path, (args.input.split('/')[(- 1)].split('.')[0] + '.avi'))
cmd_str_ins = 'ffmpeg -f image2 -i {}/%06d.jpg -b 5000k -r 30 -c:v mpeg4 {} -y'.format(ins_path, video_ins)
cmd_str_del = 'ffmpeg -f image2 -i {}/%06d.jpg -b 5000k -r 30 -c:v mpeg4 {} -y'.format(del_path, video_del)
os.system(cmd_str_ins)
os.system(cmd_str_del) |
('chaperone-procedure*', arity=Arity.geq(2))
def chaperone_procedure_star(args):
(proc, check, keys, vals) = unpack_procedure_args(args, 'chaperone-procedure*')
if ((check is values.w_false) and (not keys)):
return proc
return imp.make_interpose_procedure(imp.W_ChpProcedureStar, proc, check, keys, vals) |
def test_smoketest_defaults(cli_runner):
cli_command = 'raiden smoketest'
expected_args = {'debug': (ParameterSource.DEFAULT, False), 'eth_client': (ParameterSource.DEFAULT, EthClient.GETH)}
(_, kwargs) = get_invoked_kwargs(cli_command, cli_runner, 'raiden.ui.cli._smoketest')
assert_invoked_kwargs(kwargs, expected_args) |
class TestClickThroughRate(unittest.TestCase):
def test_click_through_rate_with_valid_input(self) -> None:
input = torch.tensor([0, 1, 0, 1, 1, 0, 0, 1])
weights = torch.tensor([1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
torch.testing.assert_close(click_through_rate(input), torch.tensor(0.5))
torch.testing.assert_close(click_through_rate(input, weights), torch.tensor(0.))
input = torch.tensor([[0, 1, 0, 1], [1, 0, 0, 1]])
weights = torch.tensor([[1.0, 2.0, 1.0, 2.0], [1.0, 2.0, 1.0, 1.0]])
torch.testing.assert_close(click_through_rate(input, num_tasks=2), torch.tensor([0.5, 0.5]))
torch.testing.assert_close(click_through_rate(input, weights, num_tasks=2), torch.tensor([0., 0.4]))
def test_click_through_rate_with_invalid_input(self) -> None:
with self.assertRaisesRegex(ValueError, '^`input` should be a one or two dimensional tensor'):
click_through_rate(torch.rand(3, 2, 2))
with self.assertRaisesRegex(ValueError, '^tensor `weights` should have the same shape as tensor `input`'):
click_through_rate(torch.rand(4, 2), torch.rand(3))
with self.assertRaisesRegex(ValueError, '`num_tasks = 1`, `input` is expected to be one-dimensional tensor,'):
click_through_rate(torch.tensor([[1, 1], [0, 1]]))
with self.assertRaisesRegex(ValueError, "`num_tasks = 2`, `input`'s shape is expected to be"):
click_through_rate(torch.tensor([1, 0, 0, 1]), num_tasks=2) |
class AppleScriptLexer(RegexLexer):
name = 'AppleScript'
url = '
aliases = ['applescript']
filenames = ['*.applescript']
version_added = '1.0'
flags = (re.MULTILINE | re.DOTALL)
Identifiers = '[a-zA-Z]\\w*'
Literals = ('AppleScript', 'current application', 'false', 'linefeed', 'missing value', 'pi', 'quote', 'result', 'return', 'space', 'tab', 'text item delimiters', 'true', 'version')
Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ', 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', 'real ', 'record ', 'reference ', 'RGB color ', 'script ', 'text ', 'unit types', '(?:Unicode )?text', 'string')
BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month', 'paragraph', 'word', 'year')
HandlerParams = ('about', 'above', 'against', 'apart from', 'around', 'aside from', 'at', 'below', 'beneath', 'beside', 'between', 'for', 'given', 'instead of', 'on', 'onto', 'out of', 'over', 'since')
Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL', 'choose application', 'choose color', 'choose file( name)?', 'choose folder', 'choose from list', 'choose remote application', 'clipboard info', 'close( access)?', 'copy', 'count', 'current date', 'delay', 'delete', 'display (alert|dialog)', 'do shell script', 'duplicate', 'exists', 'get eof', 'get volume settings', 'info for', 'launch', 'list (disks|folder)', 'load script', 'log', 'make', 'mount volume', 'new', 'offset', 'open( (for access|location))?', 'path to', 'print', 'quit', 'random number', 'read', 'round', 'run( script)?', 'say', 'scripting components', 'set (eof|the clipboard to|volume)', 'store script', 'summarize', 'system attribute', 'system info', 'the clipboard', 'time to GMT', 'write', 'quoted form')
References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', 'before', 'behind', 'every', 'front', 'index', 'last', 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose')
Operators = ('and', 'or', 'is equal', 'equals', '(is )?equal to', 'is not', "isn't", "isn't equal( to)?", 'is not equal( to)?', "doesn't equal", 'does not equal', '(is )?greater than', 'comes after', 'is not less than or equal( to)?', "isn't less than or equal( to)?", '(is )?less than', 'comes before', 'is not greater than or equal( to)?', "isn't greater than or equal( to)?", '(is )?greater than or equal( to)?', 'is not less than', "isn't less than", 'does not come before', "doesn't come before", '(is )?less than or equal( to)?', 'is not greater than', "isn't greater than", 'does not come after', "doesn't come after", 'starts? with', 'begins? with', 'ends? with', 'contains?', 'does not contain', "doesn't contain", 'is in', 'is contained by', 'is not in', 'is not contained by', "isn't contained by", 'div', 'mod', 'not', '(a )?(ref( to)?|reference to)', 'is', 'does')
Control = ('considering', 'else', 'error', 'exit', 'from', 'if', 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', 'try', 'until', 'using terms from', 'while', 'whith', 'with timeout( of)?', 'with transaction', 'by', 'continue', 'end', 'its?', 'me', 'my', 'return', 'of', 'as')
Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get')
Reserved = ('but', 'put', 'returning', 'the')
StudioClasses = ('action cell', 'alert reply', 'application', 'box', 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', 'clip view', 'color well', 'color-panel', 'combo box( item)?', 'control', 'data( (cell|column|item|row|source))?', 'default entry', 'dialog reply', 'document', 'drag info', 'drawer', 'event', 'font(-panel)?', 'formatter', 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', 'movie( view)?', 'open-panel', 'outline view', 'panel', 'pasteboard', 'plugin', 'popup button', 'progress indicator', 'responder', 'save-panel', 'scroll view', 'secure text field( cell)?', 'slider', 'sound', 'split view', 'stepper', 'tab view( item)?', 'table( (column|header cell|header view|view))', 'text( (field( cell)?|view))?', 'toolbar( item)?', 'user-defaults', 'view', 'window')
StudioEvents = ('accept outline drop', 'accept table drop', 'action', 'activated', 'alert ended', 'awake from nib', 'became key', 'became main', 'begin editing', 'bounds changed', 'cell value', 'cell value changed', 'change cell value', 'change item value', 'changed', 'child of item', 'choose menu item', 'clicked', 'clicked toolbar item', 'closed', 'column clicked', 'column moved', 'column resized', 'conclude drop', 'data representation', 'deminiaturized', 'dialog ended', 'document nib name', 'double clicked', 'drag( (entered|exited|updated))?', 'drop', 'end editing', 'exposed', 'idle', 'item expandable', 'item value', 'item value changed', 'items changed', 'keyboard down', 'keyboard up', 'launched', 'load data representation', 'miniaturized', 'mouse down', 'mouse dragged', 'mouse entered', 'mouse exited', 'mouse moved', 'mouse up', 'moved', 'number of browser rows', 'number of items', 'number of rows', 'open untitled', 'opened', 'panel ended', 'parameters updated', 'plugin loaded', 'prepare drop', 'prepare outline drag', 'prepare outline drop', 'prepare table drag', 'prepare table drop', 'read from file', 'resigned active', 'resigned key', 'resigned main', 'resized( sub views)?', 'right mouse down', 'right mouse dragged', 'right mouse up', 'rows changed', 'scroll wheel', 'selected tab view item', 'selection changed', 'selection changing', 'should begin editing', 'should close', 'should collapse item', 'should end editing', 'should expand item', 'should open( untitled)?', 'should quit( after last window closed)?', 'should select column', 'should select item', 'should select row', 'should select tab view item', 'should selection change', 'should zoom', 'shown', 'update menu item', 'update parameters', 'update toolbar item', 'was hidden', 'was miniaturized', 'will become active', 'will close', 'will dismiss', 'will display browser cell', 'will display cell', 'will display item cell', 'will display outline cell', 'will finish launching', 'will hide', 'will miniaturize', 'will move', 'will open', 'will pop up', 'will quit', 'will resign active', 'will resize( sub views)?', 'will select tab view item', 'will show', 'will zoom', 'write to file', 'zoomed')
StudioCommands = ('animate', 'append', 'call method', 'center', 'close drawer', 'close panel', 'display', 'display alert', 'display dialog', 'display panel', 'go', 'hide', 'highlight', 'increment', 'item for', 'load image', 'load movie', 'load nib', 'load panel', 'load sound', 'localized string', 'lock focus', 'log', 'open drawer', 'path for', 'pause', 'perform action', 'play', 'register', 'resume', 'scroll', 'select( all)?', 'show', 'size to fit', 'start', 'step back', 'step forward', 'stop', 'synchronize', 'unlock focus', 'update')
StudioProperties = ('accepts arrow key', 'action method', 'active', 'alignment', 'allowed identifiers', 'allows branch selection', 'allows column reordering', 'allows column resizing', 'allows column selection', 'allows customization', 'allows editing text attributes', 'allows empty selection', 'allows mixed state', 'allows multiple selection', 'allows reordering', 'allows undo', 'alpha( value)?', 'alternate image', 'alternate increment value', 'alternate title', 'animation delay', 'associated file name', 'associated object', 'auto completes', 'auto display', 'auto enables items', 'auto repeat', 'auto resizes( outline column)?', 'auto save expanded items', 'auto save name', 'auto save table columns', 'auto saves configuration', 'auto scroll', 'auto sizes all columns to fit', 'auto sizes cells', 'background color', 'bezel state', 'bezel style', 'bezeled', 'border rect', 'border type', 'bordered', 'bounds( rotation)?', 'box type', 'button returned', 'button type', 'can choose directories', 'can choose files', 'can draw', 'can hide', 'cell( (background color|size|type))?', 'characters', 'class', 'click count', 'clicked( data)? column', 'clicked data item', 'clicked( data)? row', 'closeable', 'collating', 'color( (mode|panel))', 'command key down', 'configuration', 'content(s| (size|view( margins)?))?', 'context', 'continuous', 'control key down', 'control size', 'control tint', 'control view', 'controller visible', 'coordinate system', 'copies( on scroll)?', 'corner view', 'current cell', 'current column', 'current( field)? editor', 'current( menu)? item', 'current row', 'current tab view item', 'data source', 'default identifiers', 'delta (x|y|z)', 'destination window', 'directory', 'display mode', 'displayed cell', 'document( (edited|rect|view))?', 'double value', 'dragged column', 'dragged distance', 'dragged items', 'draws( cell)? background', 'draws grid', 'dynamically scrolls', 'echos bullets', 'edge', 'editable', 'edited( data)? column', 'edited data item', 'edited( data)? row', 'enabled', 'enclosing scroll view', 'ending page', 'error handling', 'event number', 'event type', 'excluded from windows menu', 'executable path', 'expanded', 'fax number', 'field editor', 'file kind', 'file name', 'file type', 'first responder', 'first visible column', 'flipped', 'floating', 'font( panel)?', 'formatter', 'frameworks path', 'frontmost', 'gave up', 'grid color', 'has data items', 'has horizontal ruler', 'has horizontal scroller', 'has parent data item', 'has resize indicator', 'has shadow', 'has sub menu', 'has vertical ruler', 'has vertical scroller', 'header cell', 'header view', 'hidden', 'hides when deactivated', 'highlights by', 'horizontal line scroll', 'horizontal page scroll', 'horizontal ruler view', 'horizontally resizable', 'icon image', 'id', 'identifier', 'ignores multiple clicks', 'image( (alignment|dims when disabled|frame style|scaling))?', 'imports graphics', 'increment value', 'indentation per level', 'indeterminate', 'index', 'integer value', 'intercell spacing', 'item height', 'key( (code|equivalent( modifier)?|window))?', 'knob thickness', 'label', 'last( visible)? column', 'leading offset', 'leaf', 'level', 'line scroll', 'loaded', 'localized sort', 'location', 'loop mode', 'main( (bunde|menu|window))?', 'marker follows cell', 'matrix mode', 'maximum( content)? size', 'maximum visible columns', 'menu( form representation)?', 'miniaturizable', 'miniaturized', 'minimized image', 'minimized title', 'minimum column width', 'minimum( content)? size', 'modal', 'modified', 'mouse down state', 'movie( (controller|file|rect))?', 'muted', 'name', 'needs display', 'next state', 'next text', 'number of tick marks', 'only tick mark values', 'opaque', 'open panel', 'option key down', 'outline table column', 'page scroll', 'pages across', 'pages down', 'palette label', 'pane splitter', 'parent data item', 'parent window', 'pasteboard', 'path( (names|separator))?', 'playing', 'plays every frame', 'plays selection only', 'position', 'preferred edge', 'preferred type', 'pressure', 'previous text', 'prompt', 'properties', 'prototype cell', 'pulls down', 'rate', 'released when closed', 'repeated', 'requested print time', 'required file type', 'resizable', 'resized column', 'resource path', 'returns records', 'reuses columns', 'rich text', 'roll over', 'row height', 'rulers visible', 'save panel', 'scripts path', 'scrollable', 'selectable( identifiers)?', 'selected cell', 'selected( data)? columns?', 'selected data items?', 'selected( data)? rows?', 'selected item identifier', 'selection by rect', 'send action on arrow key', 'sends action when done editing', 'separates columns', 'separator item', 'sequence number', 'services menu', 'shared frameworks path', 'shared support path', 'sheet', 'shift key down', 'shows alpha', 'shows state by', 'size( mode)?', 'smart insert delete enabled', 'sort case sensitivity', 'sort column', 'sort order', 'sort type', 'sorted( data rows)?', 'sound', 'source( mask)?', 'spell checking enabled', 'starting page', 'state', 'string value', 'sub menu', 'super menu', 'super view', 'tab key traverses cells', 'tab state', 'tab type', 'tab view', 'table view', 'tag', 'target( printer)?', 'text color', 'text container insert', 'text container origin', 'text returned', 'tick mark position', 'time stamp', 'title(d| (cell|font|height|position|rect))?', 'tool tip', 'toolbar', 'trailing offset', 'transparent', 'treat packages as directories', 'truncated labels', 'types', 'unmodified characters', 'update views', 'use sort indicator', 'user defaults', 'uses data source', 'uses ruler', 'uses threaded animation', 'uses title from previous column', 'value wraps', 'version', 'vertical( (line scroll|page scroll|ruler view))?', 'vertically resizable', 'view', 'visible( document rect)?', 'volume', 'width', 'window', 'windows menu', 'wraps', 'zoomable', 'zoomed')
tokens = {'root': [('\\s+', Text), ('\\n', String.Escape), ("'s\\s+", Text), ('(--|#).*?$', Comment), ('\\(\\*', Comment.Multiline, 'comment'), ('[(){}!,.:]', Punctuation), ('()([^]+)()', bygroups(Text, Name.Builtin, Text)), ('\\b((?:considering|ignoring)\\s*)(application responses|case|diacriticals|hyphens|numeric strings|punctuation|white space)', bygroups(Keyword, Name.Builtin)), ('(-|\\*|\\+|&|=|>=?|<=?|=|||/||\\^)', Operator), (('\\b(%s)\\b' % '|'.join(Operators)), Operator.Word), (('^(\\s*(?:on|end)\\s+)(%s)' % '|'.join(StudioEvents[::(- 1)])), bygroups(Keyword, Name.Function)), ('^(\\s*)(in|on|script|to)(\\s+)', bygroups(Text, Keyword, Text)), (('\\b(as )(%s)\\b' % '|'.join(Classes)), bygroups(Keyword, Name.Class)), (('\\b(%s)\\b' % '|'.join(Literals)), Name.Constant), (('\\b(%s)\\b' % '|'.join(Commands)), Name.Builtin), (('\\b(%s)\\b' % '|'.join(Control)), Keyword), (('\\b(%s)\\b' % '|'.join(Declarations)), Keyword), (('\\b(%s)\\b' % '|'.join(Reserved)), Name.Builtin), (('\\b(%s)s?\\b' % '|'.join(BuiltIn)), Name.Builtin), (('\\b(%s)\\b' % '|'.join(HandlerParams)), Name.Builtin), (('\\b(%s)\\b' % '|'.join(StudioProperties)), Name.Attribute), (('\\b(%s)s?\\b' % '|'.join(StudioClasses)), Name.Builtin), (('\\b(%s)\\b' % '|'.join(StudioCommands)), Name.Builtin), (('\\b(%s)\\b' % '|'.join(References)), Name.Builtin), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String.Double), (('\\b(%s)\\b' % Identifiers), Name.Variable), ('[-+]?(\\d+\\.\\d*|\\d*\\.\\d+)(E[-+][0-9]+)?', Number.Float), ('[-+]?\\d+', Number.Integer)], 'comment': [('\\(\\*', Comment.Multiline, '#push'), ('\\*\\)', Comment.Multiline, '#pop'), ('[^*(]+', Comment.Multiline), ('[*(]', Comment.Multiline)]} |
.end_to_end()
def test_collect_task_with_expressions(runner, tmp_path):
source = '\n import pytask\n\n .depends_on("in_1.txt")\n .produces("out_1.txt")\n def task_example_1():\n pass\n\n .depends_on("in_2.txt")\n .produces("out_2.txt")\n def task_example_2():\n pass\n '
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
tmp_path.joinpath('in_1.txt').touch()
tmp_path.joinpath('in_2.txt').touch()
result = runner.invoke(cli, ['collect', tmp_path.as_posix(), '-k', '_1'])
assert (result.exit_code == ExitCode.OK)
captured = result.output.replace('\n', '').replace(' ', '')
assert ('<Module' in captured)
assert ('task_module.py>' in captured)
assert ('<Function' in captured)
assert ('task_example_1>' in captured)
assert ('<Function' in captured)
assert ('task_example_2>' not in captured)
result = runner.invoke(cli, ['collect', tmp_path.as_posix(), '-k', '_1', '--nodes'])
assert (result.exit_code == ExitCode.OK)
captured = result.output.replace('\n', '').replace(' ', '')
assert ('<Module' in captured)
assert ('task_module.py>' in captured)
assert ('<Function' in captured)
assert ('task_example_1>' in captured)
assert ('<Dependency' in captured)
assert ('in_1.txt>' in captured)
assert ('<Product' in captured)
assert ('out_1.txt>' in captured) |
def setup_distributed():
local_rank = (int(os.environ['LOCAL_RANK']) if ('LOCAL_RANK' in os.environ) else 0)
n_gpu = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
is_distributed = (n_gpu > 1)
if is_distributed:
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
synchronize()
return is_distributed |
.parametrize('vuln_count, pkg_count, skip_count, print_format', [(1, 1, 0, True), (2, 1, 0, True), (2, 2, 0, True), (0, 0, 0, False), (0, 1, 0, False), (0, 0, 1, True)])
def test_print_format(monkeypatch, vuln_count, pkg_count, skip_count, print_format):
dummysource = pretend.stub(fix=(lambda a: None))
monkeypatch.setattr(pip_audit._cli, 'PipSource', (lambda *a, **kw: dummysource))
dummyformat = pretend.stub(format=pretend.call_recorder((lambda _result, _fixes: None)), is_manifest=False)
monkeypatch.setattr(pip_audit._cli, 'ColumnsFormat', (lambda *a, **kw: dummyformat))
parser = pip_audit._cli._parser()
monkeypatch.setattr(pip_audit._cli, '_parse_args', (lambda x: parser.parse_args([])))
result = [(pretend.stub(is_skipped=(lambda : False), name=('something' + str(i)), canonical_name=('something' + str(i)), version=1), ([pretend.stub(fix_versions=[2], id='foo', aliases=set(), has_any_id=(lambda x: False))] * (vuln_count // pkg_count))) for i in range(pkg_count)]
result.extend(((pretend.stub(is_skipped=(lambda : True), name=('skipped ' + str(i)), canonical_name=('skipped ' + str(i)), version=1, skip_reason=('reason ' + str(i))), []) for i in range(skip_count)))
auditor = pretend.stub(audit=(lambda a: result))
monkeypatch.setattr(pip_audit._cli, 'Auditor', (lambda *a, **kw: auditor))
resolve_fix_versions = [pretend.stub(is_skipped=(lambda : False), dep=spec, version=2) for (spec, _) in result]
monkeypatch.setattr(pip_audit._cli, 'resolve_fix_versions', (lambda *a: resolve_fix_versions))
try:
pip_audit._cli.audit()
except SystemExit:
pass
assert (bool(dummyformat.format.calls) == print_format) |
class Migration(migrations.Migration):
dependencies = [('domain', '0039_meta')]
operations = [migrations.AlterField(model_name='attribute', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this attribute (auto-generated).', max_length=640, null=True, verbose_name='URI'))] |
class VanillaBlock(nn.Module):
def __init__(self, w_in, w_out, stride, bn_norm, bm=None, gw=None, se_r=None):
assert ((bm is None) and (gw is None) and (se_r is None)), 'Vanilla block does not support bm, gw, and se_r options'
super(VanillaBlock, self).__init__()
self.construct(w_in, w_out, stride, bn_norm)
def construct(self, w_in, w_out, stride, bn_norm):
self.a = nn.Conv2d(w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False)
self.a_bn = get_norm(bn_norm, w_out)
self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)
self.b_bn = get_norm(bn_norm, w_out)
self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x |
class DownSample(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), groups=1, bias=False, conv_cfg=dict(type='Conv3d'), norm_cfg=None, act_cfg=None, downsample_position='after', downsample_scale=(1, 2, 2)):
super().__init__()
self.conv = ConvModule(in_channels, out_channels, kernel_size, stride, padding, groups=groups, bias=bias, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
assert (downsample_position in ['before', 'after'])
self.downsample_position = downsample_position
self.pool = nn.MaxPool3d(downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)
def forward(self, x):
if (self.downsample_position == 'before'):
x = self.pool(x)
x = self.conv(x)
else:
x = self.conv(x)
x = self.pool(x)
return x |
def apply_across_args(*fns):
def f2(f, *names):
if (names and isinstance(names[0], int)):
if (names == 1):
return f()
else:
return [f() for i in range(names[0])]
if isinstance(names, tuple):
if (len(names) == 1):
names = names[0]
if (len(names) == 1):
return f(names)
else:
return [f(name) for name in names]
if (len(fns) == 1):
return partial(f2, fns[0])
else:
return [partial(f2, f) for f in fns] |
def test_frd_indexing():
w = np.linspace(0.99, 2.01, 11)
m = 0.6
p = ((- 180) * ((2 * w) - 1))
d = (m * np.exp((((1j * np.pi) / 180) * p)))
frd_gm = FrequencyResponseData(d, w)
(gm, _, _, wg, _, _) = stability_margins(frd_gm, returnall=True)
assert_allclose(gm, [(1 / m), (1 / m)], atol=0.01)
assert_allclose(wg, [1.0, 2.0], atol=0.01)
m = ((- (((2 * w) - 3) ** 4)) + 2)
p = (- 90.0)
d = (m * np.exp((((1j * np.pi) / 180) * p)))
frd_pm = FrequencyResponseData(d, w)
(_, pm, _, _, wp, _) = stability_margins(frd_pm, returnall=True)
assert_allclose(pm, [90.0, 90.0], atol=0.01)
assert_allclose(wp, [1.0, 2.0], atol=0.01)
w = np.arange(0.9, 2.1, 0.1)
m = 0.6
p = ((- 180) * ((2 * w) - 1))
d = (m * np.exp((((1j * np.pi) / 180) * p)))
frd_sm = FrequencyResponseData(d, w)
(_, _, sm, _, _, ws) = stability_margins(frd_sm, returnall=True)
assert_allclose(sm, [(1 - m), (1 - m)], atol=0.01)
assert_allclose(ws, [1.0, 2.0], atol=0.01) |
_model
def resmlp_24_distilled_224(pretrained=False, **kwargs):
model_args = dict(patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-05), norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args)
return model |
class NomineeList(NominationMixin, ListView):
template_name = 'nominations/nominee_list.html'
def get_queryset(self, *args, **kwargs):
election = Election.objects.get(slug=self.kwargs['election'])
if (election.nominations_complete or self.request.user.is_superuser):
return Nominee.objects.filter(accepted=True, approved=True, election=election).exclude(user=None)
elif self.request.user.is_authenticated:
return Nominee.objects.filter(user=self.request.user) |
def get_next_file(directory, pattern, templates):
files = [f for f in os.listdir(directory) if re.match(pattern, f)]
if (len(files) == 0):
return (templates[0], 0)
def key(f):
index = re.match(pattern, f).group('index')
return (0 if (index == '') else int(index))
files.sort(key=key)
n = (len(templates) - 1)
for (i, f) in enumerate(files):
index = key(f)
if (i != index):
return ((templates[0], 0) if (i == 0) else (re.sub(pattern, (lambda m: f"{m.group('pre')}{i}{m.group('post')}"), templates[min(i, n)]), i))
return (re.sub(pattern, (lambda m: f"{m.group('pre')}{(i + 1)}{m.group('post')}"), templates[min(i, n)]), (i + 1)) |
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags)
Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style)
Resolver.__init__(self) |
def build_model(model_version, quantize, model_path, device):
if (model_version == 1):
if quantize:
net = quantized_modelv1(pretrained=True, device=device).to(device)
else:
net = modelv1(pretrained=True, device=device).to(device)
elif (model_version == 2):
if quantize:
net = quantized_modelv2(pretrained=True, device=device).to(device)
else:
net = modelv2(pretrained=True, device=device).to(device)
else:
raise Exception('[!] Unexpected model version')
net = load_model(net, model_path, device)
return net |
def _interpolate(raw, input, size=None, scale_factor=None, mode='nearest', align_corners=None):
if ((mode == 'bilinear') and (align_corners == True)):
x = raw(input, size, scale_factor, mode)
name = log.add_layer(name='interp')
log.add_blobs([x], name='interp_blob')
layer = caffe_net.Layer_param(name=name, type='Interp', bottom=[log.blobs(input)], top=[log.blobs(x)])
layer.interp_param(size=size, scale_factor=scale_factor)
log.cnet.add_layer(layer)
return x
if ((mode != 'nearest') or (align_corners != None)):
raise NotImplementedError('not implement F.interpolate totoaly')
x = raw(input, size, scale_factor, mode)
layer_name = log.add_layer(name='upsample')
top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))
layer = caffe_net.Layer_param(name=layer_name, type='Upsample', bottom=[log.blobs(input)], top=top_blobs)
layer.upsample_param(size=None, scale_factor=size[0])
log.cnet.add_layer(layer)
return x |
def test_load_kasvs_ecdh_kdf_vectors():
vector_data = textwrap.dedent('\n # Parameter set(s) supported: EB EC ED EE\n # CAVSid: CAVSid (in hex: )\n # IUTid: In hex: a1b2c3d4e5\n [EB]\n\n [Curve selected: P-224]\n [SHA(s) supported (Used in the KDF function): SHA224 SHA256 SHA384 SHA512]\n [MAC algorithm supported: HMAC]\n [HMAC SHAs supported: SHA512]\n [HMACKeySize(in bits): 112]\n [HMAC Tag length(in bits): 64]\n\n # Generated on Mon Dec 22 11:45:18 2014\n\n\n\n [EB - SHA224]\n\n\n COUNT = 50\n dsCAVS = 540904b67b3716823dd621ed72ad3dbc615887b4f56f910b78a57199\n QsCAVSx = 28e5f3a72d8f6b8499dd1bcdfceafcecec68a0d715789bcf4b55fe15\n QsCAVSy = 8c8006a7da7c1a19f5328d7e865522b0c0dfb9a29b2c46dc96590d2a\n Nonce = 4eefb2a29a0e89c3898a7affdfa60dd7\n dsIUT = 5e717ae889fc8d67be11c2ebe1a7dd68a040b2dee8e327\n QsIUTx = ae7f3db340b647d61713f5374c019f1be2b28573cb6219bb7b747223\n QsIUTy = 800e6bffcf97c15864ec6e5673fb83359b45f89b8a26a27f6f3dfbff\n NonceDKMIUT = bb7f1b40d14ebdb57\n OI = a1b2c3d4e5bb7f1b40d14ebdbb1582daab9cc6c30d61fdcf1cdfc7e9a304651e0fdb\n CAVSTag = 84de198c3a958c62\n Z = 43f23b2c760d686fc99cc008b63aea92f866e224265af60d2d8ae540\n MacData = 5374616edeefb2a29a0e89c3898a7affdfa60dd7\n DKM = ad65fa2d12541c3a21f3cd223efb\n Result = F (12 - Tag changed )\n ').splitlines()
expected = [{'errno': 12, 'fail': True, 'COUNT': 50, 'CAVS': {'d': int('540904b67b3716823dd621ed72ad3dbc615887b4f56f910b78a57199', 16), 'x': int('28e5f3a72d8f6b8499dd1bcdfceafcecec68a0d715789bcf4b55fe15', 16), 'y': int('8c8006a7da7c1a19f5328d7e865522b0c0dfb9a29b2c46dc96590d2a', 16)}, 'IUT': {'d': int('5e717ae889fc8d67be11c2ebe1a7dd68a040b2dee8e327', 16), 'x': int('ae7f3db340b647d61713f5374c019f1be2b28573cb6219bb7b747223', 16), 'y': int('800e6bffcf97c15864ec6e5673fb83359b45f89b8a26a27f6f3dfbff', 16)}, 'OI': int('a1b2c3d4e5bb7f1b40d14ebdbb1582daab9cc6c30d61fdcf1cdfc7e9a304651e0fdb', 16), 'Z': int('43f23b2c760d686fc99cc008b63aea92f866e224265af60d2d8ae540', 16), 'DKM': int('ad65fa2d12541c3a21f3cd223efb', 16), 'curve': 'secp224r1'}]
assert (expected == load_kasvs_ecdh_vectors(vector_data)) |
def built(path, version_string=None):
if version_string:
fname = os.path.join(path, '.built')
if (not os.path.isfile(fname)):
return False
else:
with open(fname, 'r') as read:
text = read.read().split('\n')
return ((len(text) > 1) and (text[1] == version_string))
else:
return os.path.isfile(os.path.join(path, '.built')) |
class Logger(object):
def __init__(self, log_file, command):
self.log_file = log_file
if command:
self._write(command)
def output(self, epoch, enc_losses, dec_losses, training_samples, testing_samples, enc_mAP, dec_mAP, running_time, debug=True, log=''):
log += 'Epoch: {:2} | train enc_loss: {:.5f} dec_loss: {:.5f} | '.format(epoch, (enc_losses['train'] / training_samples), (dec_losses['train'] / training_samples))
log += ('test enc_loss: {:.5f} dec_loss: {:.5f} enc_mAP: {:.5f} dec_mAP: {:.5f} | '.format((enc_losses['test'] / testing_samples), (dec_losses['test'] / testing_samples), enc_mAP, dec_mAP) if debug else '')
log += 'running time: {:.2f} sec'.format(running_time)
self._print(log)
self._write(log)
def _print(self, log):
print(log)
def _write(self, log):
with open(self.log_file, 'a+') as f:
f.write((log + '\n')) |
def remove_embed_floats(root, paper_id):
from lxml.html.builder import IMG
for e in root.xpath('//figure[="ltx_figure"]'):
for c in e:
if ((c.tag != 'img') and (c.tag != 'figcaption')):
e.remove(c)
if ([c.tag for c in e] == ['figcaption']):
img = IMG()
src = '/static/img/{}/{}.png'.format(paper_id, e.attrib['id'].replace('.', '_'))
img.attrib['src'] = src
img.attrib['alt'] = src
e.insert(0, img)
for e in root.xpath('//figure[="ltx_table"]'):
for c in e:
if (c.tag != 'figcaption'):
e.remove(c)
img = IMG()
src = '/static/img/{}/{}.png'.format(paper_id, e.attrib['id'].replace('.', '_'))
img.attrib['src'] = src
img.attrib['alt'] = src
e.insert(0, img) |
_optics(name='BL')
def solve_beer_lambert(solar_cell: SolarCell, wavelength: NDArray, **kwargs) -> None:
solar_cell.wavelength = wavelength
fraction = np.ones(wavelength.shape)
if hasattr(solar_cell, 'shading'):
fraction *= (1 - solar_cell.shading)
if (hasattr(solar_cell, 'reflectivity') and (solar_cell.reflectivity is not None)):
solar_cell.reflected = solar_cell.reflectivity(wavelength)
fraction *= (1 - solar_cell.reflected)
else:
solar_cell.reflected = np.zeros(fraction.shape)
widths = []
alphas = []
n_layers_junction = []
for (j, layer_object) in enumerate(solar_cell):
if (type(layer_object) is Layer):
widths.append(layer_object.width)
alphas.append(layer_object.material.alpha(wavelength))
n_layers_junction.append(1)
elif (type(layer_object) is TunnelJunction):
n_layers_junction.append(len(layer_object))
for (i, layer) in enumerate(layer_object):
widths.append(layer.width)
alphas.append(layer.material.alpha(wavelength))
elif (type(layer_object) is Junction):
n_layers_junction.append(len(layer_object))
kind = (solar_cell[j].kind if hasattr(solar_cell[j], 'kind') else None)
if (kind == '2D'):
if (hasattr(solar_cell[j], 'jsc') or hasattr(solar_cell[j], 'eqe')):
print("Warning: A junction of kind '2D' found. Junction ignored in the optics calculation!")
w = layer_object.width
def alf(x):
return (0.0 * x)
solar_cell[j].alpha = alf
solar_cell[j].reflected = interp1d(wavelength, solar_cell.reflected, bounds_error=False, fill_value=(0, 0))
widths.append(w)
alphas.append(alf(wavelength))
else:
ASC.absorptance_detailed_balance(solar_cell[j])
w = layer_object.width
def alf(x):
val = np.maximum((1 - layer_object.absorptance(x)), 0.001)
return (((- 1) / w) * np.log(val))
solar_cell[j].alpha = alf
solar_cell[j].reflected = interp1d(wavelength, solar_cell.reflected, bounds_error=False, fill_value=(0, 0))
widths.append(w)
alphas.append(alf(wavelength))
elif (kind == 'DB'):
ASC.absorptance_detailed_balance(solar_cell[j])
w = layer_object.width
def alf(x):
val = np.maximum((1 - layer_object.absorptance(x)), 0.001)
return (((- 1) / w) * np.log(val))
solar_cell[j].alpha = alf
solar_cell[j].reflected = interp1d(wavelength, solar_cell.reflected, bounds_error=False, fill_value=(0, 0))
widths.append(w)
alphas.append(alf(wavelength))
else:
for (i, layer) in enumerate(layer_object):
widths.append(layer.width)
alphas.append(layer.material.alpha(wavelength))
(diff_absorption, transmitted, all_absorbed) = calculate_absorption_beer_lambert(widths, alphas, fraction)
I0 = (1 * fraction)
layers_above_offset = np.cumsum(([0] + n_layers_junction))
for j in range(len(solar_cell)):
solar_cell[j].diff_absorption = diff_absorption
solar_cell[j].absorbed = types.MethodType(absorbed, solar_cell[j])
A_junc = np.zeros_like(wavelength)
for k in range(n_layers_junction[j]):
ilayer = (layers_above_offset[j] + k)
A_layer = (I0 * (1 - np.exp(((- alphas[ilayer]) * widths[ilayer]))))
A_junc += A_layer
I0 -= A_layer
solar_cell[j].layer_absorption = A_junc
solar_cell.transmitted = transmitted
solar_cell.absorbed = all_absorbed |
_test
def test_maxpooling1d_legacy_interface():
old_layer = keras.layers.MaxPool1D(pool_length=2, border_mode='valid', name='maxpool1d')
new_layer = keras.layers.MaxPool1D(pool_size=2, padding='valid', name='maxpool1d')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config()))
old_layer = keras.layers.MaxPool1D(2, padding='valid', name='maxpool1d')
new_layer = keras.layers.MaxPool1D(pool_size=2, padding='valid', name='maxpool1d')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.