code stringlengths 281 23.7M |
|---|
_fixtures(WebFixture, ConstraintRenderingFixture)
def test_required_constraint_js(web_fixture, constraint_rendering_fixture):
fixture = constraint_rendering_fixture
constraint = RequiredConstraint()
class MyForm(Form):
def __init__(self, view, name):
super().__init__(view, name)
self.use_layout(FormLayout())
field = fixture.model_object.fields.an_attribute.with_validation_constraint(constraint)
self.layout.add_input(TextInput(self, field))
wsgi_app = web_fixture.new_wsgi_app(child_factory=MyForm.factory('myform'), enable_js=True)
web_fixture.reahl_server.set_app(wsgi_app)
web_fixture.driver_browser.open('/')
web_fixture.driver_browser.type(XPath.input_labelled('an attribute'), 'something', trigger_blur=False, wait_for_ajax=False)
web_fixture.driver_browser.press_tab()
web_fixture.driver_browser.wait_for_element_not_visible(fixture.error_xpath)
web_fixture.driver_browser.type(XPath.input_labelled('an attribute'), '')
web_fixture.driver_browser.press_keys(web_fixture.driver_browser.Keys.BACK_SPACE, locator=XPath.input_labelled('an attribute'))
web_fixture.driver_browser.wait_for_element_visible(fixture.error_xpath) |
.parametrize('case_name', POSITIVE_HOOK_CASES.keys())
def test_hook_positive_examples(case_name, run_line):
rcase = ResolvedCase.load_positive(case_name)
hook_id = POSITIVE_HOOK_CASES[case_name]
ret = run_line(((HOOK_CONFIG[hook_id] + [rcase.path]) + rcase.add_args))
assert (ret.exit_code == 0), _format_cli_result(rcase, ret) |
class webvision_dataloader():
def __init__(self, batch_size, num_batches, num_class, num_workers, root_dir, root_imagenet_dir, log):
self.batch_size = batch_size
self.num_class = num_class
self.num_samples = (None if (num_batches is None) else (self.batch_size * num_batches))
self.num_workers = num_workers
self.root_dir = root_dir
self.root_imagenet_dir = root_imagenet_dir
self.log = log
self.transform_train = transforms.Compose([transforms.RandomCrop(227), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_test = transforms.Compose([transforms.CenterCrop(227), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.transform_imagenet = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(227), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def run(self, mode, pred=[], prob=[], paths=[]):
if (mode == 'warmup'):
all_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='all', num_class=self.num_class, num_samples=(self.num_samples * 2))
trainloader = DataLoader(dataset=all_dataset, batch_size=(self.batch_size * 2), shuffle=True, num_workers=self.num_workers, pin_memory=True)
return trainloader
elif (mode == 'train'):
labeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='labeled', num_class=self.num_class, pred=pred, probability=prob, paths=paths, log=self.log)
labeled_trainloader = DataLoader(dataset=labeled_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
unlabeled_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_train, mode='unlabeled', num_class=self.num_class, pred=pred, paths=paths, log=self.log)
unlabeled_trainloader = DataLoader(dataset=unlabeled_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
return (labeled_trainloader, unlabeled_trainloader)
elif (mode == 'test'):
test_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='test', num_class=self.num_class)
test_loader = DataLoader(dataset=test_dataset, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return test_loader
elif (mode == 'eval_train'):
eval_dataset = webvision_dataset(root_dir=self.root_dir, transform=self.transform_test, mode='all', num_class=self.num_class, num_samples=(self.num_samples * 2))
eval_loader = DataLoader(dataset=eval_dataset, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return eval_loader
elif (mode == 'imagenet'):
imagenet_val = imagenet_dataset(root_dir=self.root_imagenet_dir, web_root=self.root_dir, transform=self.transform_imagenet, num_class=self.num_class)
imagenet_loader = DataLoader(dataset=imagenet_val, batch_size=(self.batch_size * 20), shuffle=False, num_workers=self.num_workers, pin_memory=True)
return imagenet_loader |
class RStripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
while ((len(item) > 0) and (item[(- 1)] == self.id_to_strip)):
item = item[:(- 1)]
return item |
def prediction_loss(train_loss, test_loss, directory):
plt.figure()
plt.plot(train_loss, color='red')
plt.plot(test_loss, color='blue')
plt.title('Prediction loss: training (red), test (blue)')
plt.xlabel('Epochs')
plt.ylabel('Loss')
name = (directory + '/predictionloss_test&train')
plt.savefig(name)
closefig() |
def _prepare(line):
while True:
positions = _find_separators(line, "'", "'")
if (positions is None):
break
(left, right) = positions
value = _global_value_of(line[(left + 1):right])
if value:
line = ((line[:left] + value) + line[(right + 1):])
else:
break
positions = _find_separators(line, 'tag(', ')')
if positions:
line = (line[:positions[0]] + line[(positions[1] + 1):])
return (None if ((line == '') or (line.strip() == '#')) else line.strip()[1:].strip()) |
class GenDAGPass(BasePass):
def __call__(self, top):
top.check()
top._dag = PassMetadata()
placeholders = [x for x in top._dsl.all_named_objects if isinstance(x, Placeholder)]
if placeholders:
raise LeftoverPlaceholderError(placeholders)
self._generate_net_blocks(top)
self._process_value_constraints(top)
self._process_methods(top)
def _generate_net_blocks(self, top):
top._dag.genblks = set()
top._dag.genblk_hostobj = {}
top._dag.genblk_reads = {}
top._dag.genblk_writes = {}
def compile_net_blk(_globals, src, writer):
_locals = {}
fname = f'Net (writer is {writer!r}'
custom_exec(compile(src, filename=fname, mode='exec'), _globals, _locals)
line_cache[fname] = (len(src), None, src.splitlines(), fname)
return list(_locals.values())[0]
for (writer, signals) in top.get_all_value_nets():
if (len(signals) == 1):
continue
all_readers = [x for x in signals if (x is not writer)]
all_fanout = len(all_readers)
readers = []
if (isinstance(writer, Const) or writer.is_top_level_signal()):
for x in all_readers:
if (not x.is_top_level_signal()):
readers.append(x)
else:
residence = None
for x in all_readers:
if x.is_top_level_signal():
if (residence is None):
residence = x
readers.append(x)
else:
readers.append(x)
fanout = len(readers)
genblk_name = f'{writer!r}__{all_fanout}_{fanout}'.replace(' ', '').replace('.', '_').replace(':', '_').replace('[', '_').replace(']', '_').replace('(', '_').replace(')', '_').replace(',', '_')
if (fanout == 0):
blk = compile_net_blk({}, f'def {genblk_name}(): pass', writer)
top._dag.genblks.add(blk)
if writer.is_signal():
top._dag.genblk_reads[blk] = [writer]
top._dag.genblk_writes[blk] = all_readers
continue
wr_lca = writer.get_host_component()
rd_lcas = [x.get_host_component() for x in readers]
mindep = min(wr_lca.get_component_level(), min([x.get_component_level() for x in rd_lcas]))
for i in range(mindep, wr_lca.get_component_level()):
wr_lca = wr_lca.get_parent_object()
for (i, x) in enumerate(rd_lcas):
for j in range(mindep, x.get_component_level()):
x = x.get_parent_object()
rd_lcas[i] = x
while (wr_lca is not top):
succeed = True
for x in rd_lcas:
if (x is not wr_lca):
succeed = False
break
if succeed:
break
wr_lca = wr_lca.get_parent_object()
for i in range(fanout):
rd_lcas[i] = rd_lcas[i].get_parent_object()
lca_len = len(repr(wr_lca))
_globals = {'s': wr_lca}
if (isinstance(writer, Const) and (type(writer._dsl.const) is not int)):
types = get_bitstruct_inst_all_classes(writer._dsl.const)
for t in types:
if (t.__name__ in _globals):
assert (t is _globals[t.__name__]), 'Cannot handle two subfields with the same struct name but different structs'
_globals[t.__name__] = t
wstr = repr(writer)
else:
wstr = f's.{repr(writer)[(lca_len + 1):]}'
rstrs = [f's.{repr(x)[(lca_len + 1):]}' for x in readers]
gen_src = '\ndef {}():\n x = {}\n {}'.format(genblk_name, wstr, '\n '.join([f'{rstr} = x' for rstr in rstrs]))
blk = compile_net_blk(_globals, gen_src, writer)
top._dag.genblks.add(blk)
if writer.is_signal():
top._dag.genblk_reads[blk] = [writer]
top._dag.genblk_writes[blk] = all_readers
top._dag.final_upblks = (top.get_all_update_blocks() | top._dag.genblks)
def _process_value_constraints(self, top):
update_ff = top.get_all_update_ff()
(upblk_reads, upblk_writes, _) = top.get_all_upblk_metadata()
(genblk_reads, genblk_writes) = (top._dag.genblk_reads, top._dag.genblk_writes)
(U_U, RD_U, WR_U, U_M) = top.get_all_explicit_constraints()
read_upblks = defaultdict(set)
write_upblks = defaultdict(set)
constraint_objs = defaultdict(set)
expl_constraints = set(top._dsl.all_U_U_constraints)
for data in [upblk_reads, genblk_reads]:
for (blk, reads) in data.items():
for rd in reads:
read_upblks[rd].add(blk)
for data in [upblk_writes, genblk_writes]:
for (blk, writes) in data.items():
for wr in writes:
write_upblks[wr].add(blk)
for typ in ['rd', 'wr']:
if (typ == 'rd'):
constraints = RD_U
equal_blks = read_upblks
else:
constraints = WR_U
equal_blks = write_upblks
for (obj, constrained_blks) in constraints.items():
for (sign, co_blk) in constrained_blks:
for eq_blk in equal_blks[obj]:
if (co_blk != eq_blk):
if (sign == 1):
U_U.add((eq_blk, co_blk))
constraint_objs[(eq_blk, co_blk)].add(obj)
else:
U_U.add((co_blk, eq_blk))
constraint_objs[(co_blk, eq_blk)].add(obj)
impl_constraints = set()
for (obj, rd_blks) in read_upblks.items():
writers = []
x = obj
while x.is_signal():
if (x in write_upblks):
writers.append(x)
x = x.get_parent_object()
if obj.is_signal():
for x in obj.get_sibling_slices():
if (x.slice_overlap(obj) and (x in write_upblks)):
writers.append(x)
for writer in writers:
for wr_blk in write_upblks[writer]:
if (wr_blk not in update_ff):
for rd_blk in rd_blks:
if (wr_blk != rd_blk):
impl_constraints.add((wr_blk, rd_blk))
constraint_objs[(wr_blk, rd_blk)].add(obj)
for (obj, wr_blks) in write_upblks.items():
readers = []
x = obj
while x.is_signal():
if (x in read_upblks):
readers.append(x)
x = x.get_parent_object()
for wr_blk in wr_blks:
if (wr_blk not in update_ff):
for reader in readers:
for rd_blk in read_upblks[reader]:
if (wr_blk != rd_blk):
impl_constraints.add((wr_blk, rd_blk))
constraint_objs[(wr_blk, rd_blk)].add(obj)
top._dag.constraint_objs = constraint_objs
top._dag.all_constraints = {*U_U}
for (x, y) in impl_constraints:
if ((y, x) not in U_U):
top._dag.all_constraints.add((x, y))
def _process_methods(self, top):
(_, _, _, all_M_constraints) = top.get_all_explicit_constraints()
top._dsl.top_level_callee_ports = top.get_all_object_filter((lambda x: (isinstance(x, CalleePort) and (x.get_host_component() is top))))
method_is_top_level_callee = set()
all_method_nets = top.get_all_method_nets()
for (writer, net) in all_method_nets:
if (writer is not None):
for member in net:
if (member is not writer):
assert (member.method is None)
member.method = writer.method
if (member.get_host_component() is top):
method_is_top_level_callee.add(writer.method)
for callee in top._dsl.top_level_callee_ports:
if callee.method:
method_is_top_level_callee.add(callee.method)
method_blks = defaultdict(set)
for (blk, calls) in top._dsl.all_upblk_calls.items():
for call in calls:
if isinstance(call, MethodPort):
method_blks[call.method].add(blk)
elif isinstance(call, (NonBlockingIfc, BlockingIfc)):
method_blks[call.method.method].add(blk)
else:
method_blks[call].add(blk)
pred = defaultdict(set)
succ = defaultdict(set)
top._dag.top_level_callee_constraints = set()
equiv = defaultdict(set)
for (x, y, is_equal) in all_M_constraints:
if is_equal:
if isinstance(x, MethodPort):
xx = x.method
elif isinstance(x, (NonBlockingIfc, BlockingIfc)):
xx = x.method.method
else:
xx = x
if isinstance(y, MethodPort):
yy = y.method
elif isinstance(y, (NonBlockingIfc, BlockingIfc)):
yy = y.method.method
else:
yy = y
equiv[xx].add(yy)
equiv[yy].add(xx)
visited = set()
for x in equiv:
if (x not in visited):
equiv_class = set()
visited.add(x)
Q = deque([x])
while Q:
u = Q.popleft()
equiv_class.add(u)
for v in equiv[u]:
if (v not in visited):
visited.add(v)
Q.append(v)
for u in equiv_class:
equiv[u] = equiv_class
for (x, y, is_equal) in all_M_constraints:
if is_equal:
continue
if isinstance(x, MethodPort):
xx = x.method
elif isinstance(x, (NonBlockingIfc, BlockingIfc)):
xx = x.method.method
else:
xx = x
if isinstance(y, MethodPort):
yy = y.method
elif isinstance(y, (NonBlockingIfc, BlockingIfc)):
yy = y.method.method
else:
yy = y
pred[yy].add(xx)
succ[xx].add(yy)
if (xx in equiv):
for zz in equiv[xx]:
if (zz in method_is_top_level_callee):
top._dag.top_level_callee_constraints.add((zz, yy))
elif (xx in method_is_top_level_callee):
top._dag.top_level_callee_constraints.add((xx, yy))
if (yy in equiv):
for zz in equiv[yy]:
if (zz in method_is_top_level_callee):
top._dag.top_level_callee_constraints.add((xx, zz))
elif (yy in method_is_top_level_callee):
top._dag.top_level_callee_constraints.add((xx, yy))
verbose = False
all_upblks = top.get_all_update_blocks()
for (method, assoc_blks) in method_blks.items():
visited = {(method, 0)}
Q = deque([(method, 0)])
if verbose:
print()
while Q:
(u, w) = Q.pop()
if verbose:
print((u, w))
if (u in equiv):
for v in equiv[u]:
if ((v, w) not in visited):
visited.add((v, w))
Q.append((v, w))
if (w <= 0):
for v in pred[u]:
if (v in all_upblks):
for blk in assoc_blks:
if (blk not in pred[u]):
if (v != blk):
if verbose:
print('w<=0, v is blk'.center(10), v, blk)
if verbose:
print(v.__name__.center(25), ' < ', blk.__name__.center(25))
top._dag.all_constraints.add((v, blk))
else:
if (v in method_blks):
v_blks = method_blks[v]
for vb in v_blks:
if (vb not in succ[u]):
for blk in assoc_blks:
if (blk not in pred[v]):
if (vb != blk):
if verbose:
print('w<=0, v is method'.center(10), v, blk)
if verbose:
print(vb.__name__.center(25), ' < ', blk.__name__.center(25))
top._dag.all_constraints.add((vb, blk))
if ((v, (- 1)) not in visited):
visited.add((v, (- 1)))
Q.append((v, (- 1)))
if (w >= 0):
for v in succ[u]:
if (v in all_upblks):
for blk in assoc_blks:
if (blk not in succ[u]):
if (v != blk):
if verbose:
print('w>=0, v is blk'.center(10), blk, v)
if verbose:
print(blk.__name__.center(25), ' < ', v.__name__.center(25))
top._dag.all_constraints.add((blk, v))
else:
if (v in method_blks):
v_blks = method_blks[v]
for vb in v_blks:
if (not (vb in pred[u])):
for blk in assoc_blks:
if (not (blk in succ[v])):
if (vb != blk):
if verbose:
print('w>=0, v is method'.center(10), blk, v)
if verbose:
print(blk.__name__.center(25), ' < ', vb.__name__.center(25))
top._dag.all_constraints.add((blk, vb))
if ((v, 1) not in visited):
visited.add((v, 1))
Q.append((v, 1))
blocking_ifcs = top.get_all_object_filter((lambda x: isinstance(x, (CalleeIfcFL, CallerIfcFL))))
top._dag.greenlet_upblks = set()
for blocking_method in blocking_ifcs:
for blk in method_blks[blocking_method.method.method]:
top._dag.greenlet_upblks.add(blk) |
class TestKazooRetry(unittest.TestCase):
def _makeOne(self, **kw):
from kazoo.retry import KazooRetry
return KazooRetry(**kw)
def test_connection_closed(self):
from kazoo.exceptions import ConnectionClosedError
retry = self._makeOne()
def testit():
raise ConnectionClosedError()
with pytest.raises(ConnectionClosedError):
retry(testit)
def test_session_expired(self):
from kazoo.exceptions import SessionExpiredError
retry = self._makeOne(max_tries=1)
def testit():
raise SessionExpiredError()
with pytest.raises(Exception):
retry(testit) |
class EigenstateResult(AlgorithmResult):
def __init__(self) -> None:
super().__init__()
self.eigenvalues: (np.ndarray | None) = None
self.eigenstates: (list[tuple[(QuantumCircuit, (Sequence[float] | None))]] | None) = None
self.aux_operators_evaluated: (list[ListOrDict[complex]] | None) = None
self.raw_result: (AlgorithmResult | None) = None
self.formatting_precision: int = 12
def groundenergy(self) -> (float | None):
energies = self.eigenvalues
if (isinstance(energies, np.ndarray) and energies.size):
return energies[0].real
return None
def groundstate(self) -> (tuple[(QuantumCircuit, (Sequence[float] | None))] | None):
states = self.eigenstates
if states:
return states[0]
return None
def from_result(cls, raw_result: ((EigenstateResult | EigensolverResult) | MinimumEigensolverResult)) -> EigenstateResult:
cls_names = {cls.__name__ for cls in raw_result.__class__.mro()}
if (isinstance(raw_result, EigenstateResult) or ('EigenstateResult' in cls_names)):
return raw_result
if (isinstance(raw_result, EigensolverResult) or ('EigensolverResult' in cls_names)):
return EigenstateResult.from_eigensolver_result(raw_result)
if (isinstance(raw_result, MinimumEigensolverResult) or ('MinimumEigensolverResult' in cls_names)):
return EigenstateResult.from_minimum_eigensolver_result(raw_result)
raise TypeError(f'Cannot construct an EigenstateResult from a result of type, {type(raw_result)}.')
def from_eigensolver_result(cls, raw_result: EigensolverResult) -> EigenstateResult:
result = EigenstateResult()
result.raw_result = raw_result
result.eigenvalues = np.asarray(raw_result.eigenvalues)
if hasattr(raw_result, 'eigenstates'):
result.eigenstates = [(_statevector_to_circuit(Statevector(state)), None) for state in raw_result.eigenstates]
elif (hasattr(raw_result, 'optimal_circuits') and hasattr(raw_result, 'optimal_points')):
result.eigenstates = list(zip(raw_result.optimal_circuits, raw_result.optimal_points))
if (raw_result.aux_operators_evaluated is not None):
result.aux_operators_evaluated = [cls._unwrap_aux_op_values(aux_op_eval) for aux_op_eval in raw_result.aux_operators_evaluated]
return result
def from_minimum_eigensolver_result(cls, raw_result: MinimumEigensolverResult) -> EigenstateResult:
result = EigenstateResult()
result.raw_result = raw_result
result.eigenvalues = np.asarray([raw_result.eigenvalue])
if hasattr(raw_result, 'eigenstate'):
result.eigenstates = [(_statevector_to_circuit(Statevector(raw_result.eigenstate)), None)]
elif (hasattr(raw_result, 'optimal_circuit') and hasattr(raw_result, 'optimal_point')):
result.eigenstates = [(raw_result.optimal_circuit, raw_result.optimal_point)]
if (raw_result.aux_operators_evaluated is not None):
result.aux_operators_evaluated = [cls._unwrap_aux_op_values(raw_result.aux_operators_evaluated)]
return result
def _unwrap_aux_op_values(aux_operators_evaluated: ListOrDict[tuple[(complex, dict[(str, Any)])]]) -> ListOrDict[complex]:
aux_op_values: ListOrDict[complex]
if isinstance(aux_operators_evaluated, list):
aux_op_values = [val[0] for val in aux_operators_evaluated]
else:
aux_op_values = {key: val[0] for (key, val) in aux_operators_evaluated.items()}
return aux_op_values |
class GetCustomEmojiStickers():
async def get_custom_emoji_stickers(self: 'pyrogram.Client', custom_emoji_ids: List[int]) -> List['types.Sticker']:
result = (await self.invoke(raw.functions.messages.GetCustomEmojiDocuments(document_id=custom_emoji_ids)))
stickers = []
for item in result:
attributes = {type(i): i for i in item.attributes}
sticker = (await types.Sticker._parse(self, item, attributes))
stickers.append(sticker)
return pyrogram.types.List(stickers) |
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup('order')
group.addoption('--indulgent-ordering', action='store_true', dest='indulgent_ordering', help='Request that the sort order provided by pytest-order be applied before other sorting, allowing the other sorting to have priority')
group.addoption('--order-scope', action='store', dest='order_scope', help="Defines the scope used for ordering. Possible values are: 'session' (default), 'module', and 'class'. Ordering is only done inside a scope.")
group.addoption('--order-scope-level', action='store', type=int, dest='order_scope_level', help='Defines that the given directory level is used as order scope. Cannot be used with --order-scope. The value is a number that defines the hierarchical index of the directories used as order scope, starting with 0 at session scope.')
group.addoption('--order-group-scope', action='store', dest='order_group_scope', help="Defines the scope used for order groups. Possible values are: 'session' (default), 'module', and 'class'. Ordering is first done inside a group, then between groups.")
group.addoption('--sparse-ordering', action='store_true', dest='sparse_ordering', help='If there are gaps between ordinals, they are filled with unordered tests.')
group.addoption('--order-dependencies', action='store_true', dest='order_dependencies', help='If set, dependencies added by pytest-dependency will be ordered if needed.')
group.addoption('--order-marker-prefix', action='store', dest='order_marker_prefix', help='If set, markers starting with the given prefix followed by a number are handled like order markers with an index.') |
class ContextAE():
def __init__(self, gf_dim=64, df_dim=64, gfc_dim=1024, dfc_dim=1024, c_dim=3):
self.gf_dim = gf_dim
self.df_dim = df_dim
self.c_dim = c_dim
self.gfc_dim = gfc_dim
self.dfc_dim = dfc_dim
def build(self, image):
imgshape = image.get_shape().as_list()
print(imgshape)
(self.output_height, self.output_width) = imgshape[(- 3):(- 1)]
self.batch_size = imgshape[1]
featsize = 1024
inputimg = image[0]
contextimg = image[1]
outputimg = image[2]
with tf.variable_scope('conv_context') as scope:
h0 = conv2d(contextimg, self.df_dim, name='h0_conv')
c_bn1 = batch_norm(name='c_bn1')
c_bn2 = batch_norm(name='c_bn2')
c_bn3 = batch_norm(name='c_bn3')
c_bn4 = batch_norm(name='c_bn4')
h1 = lrelu(c_bn1(conv2d(h0, (self.df_dim * 2), name='h1_conv')))
h2 = lrelu(c_bn2(conv2d(h1, (self.df_dim * 4), name='h2_conv')))
h3 = lrelu(c_bn3(conv2d(h2, (self.df_dim * 8), name='h3_conv')))
h4 = lrelu(c_bn4(linear(tf.reshape(h3, [self.batch_size, (- 1)]), featsize, 'h4_lin')))
z_ctx = linear(h4, featsize, 'hz_lin')
with tf.variable_scope('conv') as scope:
h0 = conv2d(inputimg, self.df_dim, name='h0_conv')
c_bn1 = batch_norm(name='c_bn1')
c_bn2 = batch_norm(name='c_bn2')
c_bn3 = batch_norm(name='c_bn3')
c_bn4 = batch_norm(name='c_bn4')
h1 = lrelu(c_bn1(conv2d(h0, (self.df_dim * 2), name='h1_conv')))
h2 = lrelu(c_bn2(conv2d(h1, (self.df_dim * 4), name='h2_conv')))
h3 = lrelu(c_bn3(conv2d(h2, (self.df_dim * 8), name='h3_conv')))
h4 = lrelu(c_bn4(linear(tf.reshape(h3, [self.batch_size, (- 1)]), featsize, 'h4_lin')))
z = linear(h4, featsize, 'hz_lin')
self.z = z
print(self.z.get_shape())
with tf.variable_scope('deconv') as scope:
d_bn0 = batch_norm(name='d_bn0')
d_bn1 = batch_norm(name='d_bn1')
d_bn2 = batch_norm(name='d_bn2')
d_bn3 = batch_norm(name='d_bn3')
(s_h, s_w) = (self.output_height, self.output_width)
(s_h2, s_h4, s_h8, s_h16) = (int((s_h / 2)), int((s_h / 4)), int((s_h / 8)), int((s_h / 16)))
(s_w2, s_w4, s_w8, s_w16) = (int((s_w / 2)), int((s_w / 4)), int((s_w / 8)), int((s_w / 16)))
(self.z_, self.h0_w, self.h0_b) = linear(tf.concat([z, z_ctx], 1), (((self.gf_dim * 8) * s_h16) * s_w16), 'd_h0_lin', with_w=True)
self.h0 = tf.reshape(self.z_, [(- 1), s_h16, s_w16, (self.gf_dim * 8)])
h0 = lrelu(d_bn0(self.h0))
(self.h1, self.h1_w, self.h1_b) = deconv2d(h0, [self.batch_size, s_h8, s_w8, (self.gf_dim * 4)], name='d_h1', with_w=True)
h1 = lrelu(d_bn1(self.h1))
(h2, self.h2_w, self.h2_b) = deconv2d(h1, [self.batch_size, s_h4, s_w4, (self.gf_dim * 2)], name='d_h2', with_w=True)
h2 = lrelu(d_bn2(h2))
(h3, self.h3_w, self.h3_b) = deconv2d(h2, [self.batch_size, s_h2, s_w2, (self.gf_dim * 1)], name='d_h3', with_w=True)
h3 = lrelu(d_bn3(h3))
(h4, self.h4_w, self.h4_b) = deconv2d(h3, [self.batch_size, s_h, s_w, self.c_dim], name='d_h4', with_w=True)
self.out = h4
self.loss = tf.nn.l2_loss((outputimg - self.out)) |
class Range():
def __init__(self, gdf, values, spatial_weights, unique_id, rng=(0, 100), verbose=True, **kwargs):
self.gdf = gdf
self.sw = spatial_weights
self.id = gdf[unique_id]
self.rng = rng
self.kwargs = kwargs
data = gdf.copy()
if ((values is not None) and (not isinstance(values, str))):
data['mm_v'] = values
values = 'mm_v'
self.values = data[values]
data = data.set_index(unique_id)[values]
results_list = []
for index in tqdm(data.index, total=data.shape[0], disable=(not verbose)):
if (index in spatial_weights.neighbors):
neighbours = [index]
neighbours += spatial_weights.neighbors[index]
values_list = data.loc[neighbours]
results_list.append(sp.stats.iqr(values_list, rng=rng, **kwargs))
else:
results_list.append(np.nan)
self.series = pd.Series(results_list, index=gdf.index) |
def initialize_uninitialized_vars(sess):
with sess.graph.as_default():
global_vars = tf.compat.v1.global_variables()
is_not_initialized = sess.run([(~ tf.compat.v1.is_variable_initialized(var)) for var in global_vars])
uninitialized_vars = list(compress(global_vars, is_not_initialized))
if uninitialized_vars:
log.info('Initializing uninitialized variables')
sess.run(tf.compat.v1.variables_initializer(uninitialized_vars)) |
def test_nested_while_with_continue() -> None:
src = '\n while n > 10:\n while n > 20:\n continue\n print(n - 1)\n continue\n print(n)\n '
cfg = build_cfg(src)
expected_blocks = [['n > 10'], ['n > 20'], ['continue'], ['print(n - 1)', 'continue'], ['print(n)'], []]
assert (expected_blocks == _extract_blocks(cfg))
expected_edges = [[['n > 10'], ['n > 20']], [['n > 20'], ['continue']], [['continue'], ['n > 20']], [['n > 20'], ['print(n - 1)', 'continue']], [['print(n - 1)', 'continue'], ['n > 10']], [['n > 10'], ['print(n)']], [['print(n)'], []]]
assert (expected_edges == _extract_edges(cfg)) |
class GeneralTranslationTask(Task):
VERSION = 0
def __init__(self, sacrebleu_dataset, sacrebleu_language_pair=None):
self.sacrebleu_dataset = sacrebleu_dataset
self.sacrebleu_language_pair = sacrebleu_language_pair
self.src_file = self.ref_file = self.src_data = self.ref_data = None
super().__init__()
def download(self, data_dir=None, cache_dir=None, download_mode=None):
(self.src_file, self.ref_file) = sacrebleu.download_test_set(self.sacrebleu_dataset, self.sacrebleu_language_pair)
(self.src_data, self.ref_data) = [[line.rstrip() for line in sacrebleu.smart_open(file)] for file in (self.src_file, self.ref_file)]
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
def test_docs(self):
return [{'src': src, 'ref': ref} for (src, ref) in zip(self.src_data, self.ref_data)]
def doc_to_text(self, doc):
language_codes = self.sacrebleu_language_pair.split('-')
src_lang = code_to_language(language_codes[0])
tar_lang = code_to_language(language_codes[1])
return ((f'{src_lang} phrase: ' + doc['src']) + f'''
{tar_lang} phrase:''')
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc['src']
def doc_to_target(self, doc):
return ((' ' + doc['ref']) if isinstance(doc['ref'], str) else doc['ref'][0])
def construct_requests(self, doc, ctx):
return rf.greedy_until(ctx, ['\n'])
def process_results(self, doc, results):
tar_lang_code = self.sacrebleu_language_pair.split('-')[(- 1)]
if (tar_lang_code in NO_SPACE_LANG):
doc['ref'] = NO_SPACE_LANG[tar_lang_code]([doc['ref']])[0]
results = NO_SPACE_LANG[tar_lang_code](results)
ref_pred = (doc['ref'], results)
return {'bleu': ref_pred, 'chrf': ref_pred, 'ter': ref_pred}
def aggregation(self):
return {'bleu': metrics.bleu, 'chrf': metrics.chrf, 'ter': metrics.ter}
def higher_is_better(self):
return {'bleu': True, 'chrf': True, 'ter': False}
def __str__(self):
language_codes = self.sacrebleu_language_pair.split('-')
src_lang = code_to_language(language_codes[0])
tar_lang = code_to_language(language_codes[1])
return f'{self.sacrebleu_dataset.upper()} {src_lang} to {tar_lang} Task' |
def train(num_epochs, model, optimizers, train_loader, val_loader, fabric):
for epoch in range(num_epochs):
train_acc = torchmetrics.Accuracy(task='multiclass', num_classes=10).to(fabric.device)
model.train()
for (batch_idx, (features, targets)) in enumerate(train_loader):
model.train()
logits = model(features)
loss = F.cross_entropy(logits, targets)
with fsdp_overlap_step_with_backward(optimizers, model):
fabric.backward(loss)
if (not (batch_idx % 50)):
fabric.print(f'Epoch: {(epoch + 1):04d}/{num_epochs:04d} | Batch {batch_idx:04d}/{len(train_loader):04d} | Loss: {loss:.4f}')
model.eval()
with torch.no_grad():
predicted_labels = torch.argmax(logits, 1)
train_acc.update(predicted_labels, targets)
model.eval()
with torch.no_grad():
val_acc = torchmetrics.Accuracy(task='multiclass', num_classes=10).to(fabric.device)
for (features, targets) in val_loader:
outputs = model(features)
predicted_labels = torch.argmax(outputs, 1)
val_acc.update(predicted_labels, targets)
fabric.print(f'Epoch: {(epoch + 1):04d}/{num_epochs:04d} | Train acc.: {(train_acc.compute() * 100):.2f}% | Val acc.: {(val_acc.compute() * 100):.2f}%')
(train_acc.reset(), val_acc.reset()) |
class W_Vector(W_MVector):
_attrs_ = ['strategy', 'storage', 'len']
errorname = 'vector'
import_from_mixin(StrategyVectorMixin)
def __init__(self, strategy, storage, len):
self.strategy = strategy
self.storage = storage
self.len = len
def get_len(self):
return self.len
def set_len(self, new_len):
self.len = new_len
def add1_len(self):
self.len += 1
def get_strategy(self):
return self.strategy
def set_strategy(self, strategy):
if (not config.strategies):
assert (strategy is ObjectVectorStrategy.singleton)
self.strategy = strategy
def fromelements(elems, immutable=False):
strategy = _find_strategy_class(elems, immutable)
storage = strategy.create_storage_for_elements(elems)
return W_Vector(strategy, storage, len(elems))
def fromelement(elem, times, immutable=False, strategy=None):
if ((not config.strategies) or (times == 0)):
strategy = ObjectVectorStrategy.singleton
elif (strategy is None):
strategy = ConstantVectorStrategy.singleton
if immutable:
strategy = strategy.immutable_variant()
storage = strategy.create_storage_for_element(elem, times)
return W_Vector(strategy, storage, times)
def tostring(self):
l = self.strategy.ref_all(self)
return ('#(%s)' % ' '.join([obj.tostring() for obj in l]))
def _make_copy(self, immutable=False):
return self.strategy._copy_storage(self, immutable=immutable)
def hash_equal(self, info=None):
raise UnhashableType
def equal(self, other):
if (not isinstance(other, W_MVector)):
return False
if (self is other):
return True
if (self.length() != other.length()):
return False
for i in range(self.length()):
if (not self.ref(i).equal(other.ref(i))):
return False
return True |
def add_methods_to_generator_class(builder: IRBuilder, fn_info: FuncInfo, sig: FuncSignature, arg_regs: list[Register], blocks: list[BasicBlock], is_coroutine: bool) -> None:
helper_fn_decl = add_helper_to_generator_class(builder, arg_regs, blocks, sig, fn_info)
add_next_to_generator_class(builder, fn_info, helper_fn_decl, sig)
add_send_to_generator_class(builder, fn_info, helper_fn_decl, sig)
add_iter_to_generator_class(builder, fn_info)
add_throw_to_generator_class(builder, fn_info, helper_fn_decl, sig)
add_close_to_generator_class(builder, fn_info)
if is_coroutine:
add_await_to_generator_class(builder, fn_info) |
class LongPressMixin(RequiredServicesMixin):
EVENT_TYPE_LONG_PRESS = 'LongPress'
def _required_services(self) -> list[RequiredService]:
return (super()._required_services + [RequiredService(name='rules', actions=['FetchRules', 'StoreRules'])])
_type_check
def list_long_press_udns(self) -> frozenset[str]:
devices = []
with rules_db_from_device(self) as rules_db:
for (rule, _) in rules_db.rules_for_device(rule_type=RULE_TYPE_LONG_PRESS):
devices.extend(rules_db.get_target_devices_for_rule(rule))
return frozenset(devices)
_type_check
def add_long_press_udns(self, device_udns: Iterable[str]) -> None:
with rules_db_from_device(self) as rules_db:
rule = ensure_long_press_rule_exists(rules_db, self.name, self.udn)
for udn in device_udns:
if (not udn):
continue
if (udn not in rules_db.get_target_devices_for_rule(rule)):
rules_db.add_target_device_to_rule(rule, udn)
_type_check
def remove_long_press_udns(self, device_udns: Iterable[str]) -> None:
with rules_db_from_device(self) as rules_db:
for (rule, _) in rules_db.rules_for_device(rule_type=RULE_TYPE_LONG_PRESS):
for udn in device_udns:
if (udn in rules_db.get_target_devices_for_rule(rule)):
rules_db.remove_target_device_from_rule(rule, udn)
_type_check
def get_long_press_action(self) -> (ActionType | None):
with rules_db_from_device(self) as rules_db:
for (_, device) in rules_db.rules_for_device(rule_type=RULE_TYPE_LONG_PRESS):
return ActionType(device.StartAction)
return None
_type_check
def set_long_press_action(self, action: ActionType) -> None:
with rules_db_from_device(self) as rules_db:
ensure_long_press_rule_exists(rules_db, self.name, self.udn)
for (_, device) in rules_db.rules_for_device(rule_type=RULE_TYPE_LONG_PRESS):
device.StartAction = action.value
def ensure_long_press_virtual_device(self) -> None:
self.add_long_press_udns([VIRTUAL_DEVICE_UDN])
def remove_long_press_virtual_device(self) -> None:
self.remove_long_press_udns([VIRTUAL_DEVICE_UDN]) |
def api_response(result: Any, status_code: HTTPStatus=HTTPStatus.OK) -> Response:
if (status_code == HTTPStatus.NO_CONTENT):
assert (not result), 'Provided 204 response with non-zero length response'
data = ''
else:
data = json.dumps(result)
log.debug('Request successful', response=result, status_code=status_code)
response = make_response((data, status_code, {'mimetype': 'application/json', 'Content-Type': 'application/json'}))
return response |
_bpe('characters')
class Characters(object):
def __init__(self, args):
pass
def add_args(parser):
pass
def encode(x: str) -> str:
escaped = x.replace(SPACE, SPACE_ESCAPE)
return SPACE.join(list(escaped))
def decode(x: str) -> str:
return x.replace(SPACE, '').replace(SPACE_ESCAPE, SPACE) |
def _concat(prefix, suffix, static=False):
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if (p.shape.ndims == 0):
p = array_ops.expand_dims(p, 0)
elif (p.shape.ndims != 1):
raise ValueError(('prefix tensor must be either a scalar or vector, but saw tensor: %s' % p))
else:
p = tensor_shape.as_shape(prefix)
p_static = (p.as_list() if (p.ndims is not None) else None)
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32) if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if (s.shape.ndims == 0):
s = array_ops.expand_dims(s, 0)
elif (s.shape.ndims != 1):
raise ValueError(('suffix tensor must be either a scalar or vector, but saw tensor: %s' % s))
else:
s = tensor_shape.as_shape(suffix)
s_static = (s.as_list() if (s.ndims is not None) else None)
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32) if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = (shape.as_list() if (shape.ndims is not None) else None)
else:
if ((p is None) or (s is None)):
raise ValueError(('Provided a prefix or suffix of None: %s and %s' % (prefix, suffix)))
shape = array_ops.concat((p, s), 0)
return shape |
class EnlightenGANOptions(BaseOptions):
def __init__(self, training):
BaseOptions.__init__(self)
if training:
self.parser.add_argument('--dirA', type=str, required=True, help='Path to training dataset A')
self.parser.add_argument('--dirB', type=str, required=True, help='Path to training dataset B')
else:
self.parser.add_argument('--dir', type=str, required=True, help='Path to test dataset')
self.parser.add_argument('--ngf', type=int, default=32, help='# of filters in first conv. layer of generator')
self.parser.add_argument('--netG', type=str, default='sid_unet_resize', help='Specify generator architecture [resnet_9blocks | resnet_6blocks | sid_unet_resize]')
self.parser.add_argument('--self_attention', action='store_true', help='Adding attention on the input of generator')
self.parser.add_argument('--times_residual', action='store_true', help='output = input + residual*attention')
self.parser.add_argument('--skip', type=float, default=1.0, help='B = G(A) + skip*A')
self.parser.add_argument('--ndf', type=int, default=64, help='# of filters in first conv. layer of discriminator')
self.parser.add_argument('--netD', type=str, default='no_norm_n_layers', help='Specify discriminator architecture [basic | n_layers | no_norm_n_layers | pixel].')
self.parser.add_argument('--n_layers', type=int, default=5, help='# of layers for discriminator. Only used if netD==[n_layers | no_norm_n_layers]')
self.parser.add_argument('--n_layers_patch', type=int, default=4, help='# of layers for patch discriminator. Only used if netD==[n_layers | no_norm_n_layers]')
self.parser.add_argument('--patchD', action='store_true', help='Use patch discriminator')
self.parser.add_argument('--patchD_3', type=int, default=0, help='Choose number of crops for patch discriminator')
self.parser.add_argument('--patch_size', type=int, default=32, help='Size to crop patches to')
self.parser.add_argument('--vgg', action='store_true', help='Use perceptual loss')
self.parser.add_argument('--vgg_choose', type=str, default='block5_conv1', help='Choose layer of VGG')
self.parser.add_argument('--no_vgg_instance', action='store_true', help='Whether to apply instance normalization on extracted features')
self.parser.add_argument('--patch_vgg', action='store_true', help='use vgg loss between each patch')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--gan_mode', type=str, default='lsgan', help='Use least square GAN or vanilla GAN. Default is LSGAN.')
self.parser.add_argument('--use_ragan', action='store_true', help='Use ragan')
self.parser.add_argument('--hybrid_loss', action='store_true', help='Use lsgan and ragan separately')
def parse(self):
return self.parser.parse_args() |
(params=_list_of_kernels, ids=(lambda p: p['kernel'].string_id()))
def kernel(request):
m = request.param['kernel']
d = m.__dict__
for (k, v) in request.param.items():
if (k == 'kernel'):
continue
k = ('test_' + k.replace('-', '_'))
d[k] = v
return m |
def add_orders(order_id, price, user_id, product_id, rating=None):
command = 'INSERT INTO orders \n (id, price, user_id, product_id, rating)\n VALUES (%s, %s, %s, %s, %s)'
command_args = (order_id, price, int(user_id), int(product_id), rating)
db.execute_a_data_manipulation(command, command_args) |
class Scope():
def __init__(self, pycore, pyobject, parent_scope):
self.pycore = pycore
self.pyobject = pyobject
self.parent = parent_scope
def get_names(self):
return self.pyobject.get_attributes()
def get_defined_names(self):
return self.pyobject._get_structural_attributes()
def get_name(self, name):
if (name not in self.get_names()):
raise exceptions.NameNotFoundError(('name %s not found' % name))
return self.get_names()[name]
def __getitem__(self, key):
return self.get_name(key)
def __contains__(self, key):
return (key in self.get_names())
def get_scopes(self):
return self._create_scopes()
def lookup(self, name):
if (name in self.get_names()):
return self.get_names()[name]
if (self.parent is not None):
return self.parent._propagated_lookup(name)
return None
def get_propagated_names(self):
return self.get_names()
def _propagated_lookup(self, name):
if (name in self.get_propagated_names()):
return self.get_propagated_names()[name]
if (self.parent is not None):
return self.parent._propagated_lookup(name)
return None
def _create_scopes(self):
return [pydefined.get_scope() for pydefined in self.pyobject._get_defined_objects()]
def _get_global_scope(self):
current = self
while (current.parent is not None):
current = current.parent
return current
def get_start(self):
return self.pyobject.get_ast().lineno
def get_body_start(self):
body = self.pyobject.get_ast().body
if body:
return body[0].lineno
return self.get_start()
def get_end(self):
pymodule = self._get_global_scope().pyobject
return pymodule.logical_lines.logical_line_in(self.logical_end)[1]
def get_logical_end(self):
global_scope = self._get_global_scope()
return global_scope._scope_finder.find_scope_end(self)
start = property(get_start)
end = property(get_end)
logical_end = property(get_logical_end)
def get_kind(self):
pass
def get_region(self):
self._calculate_scope_regions_for_module()
node = self.pyobject.get_ast()
region = patchedast.node_region(node)
return region
def _calculate_scope_regions_for_module(self):
self._get_global_scope()._calculate_scope_regions()
def in_region(self, offset):
region = self.get_region()
return (region[0] < offset < region[1]) |
(init=False, unsafe_hash=True)
class LineLayout():
size: int
origin: Tuple[(int, int)]
rotation: int
def __init__(self, *, size: int, origin: Tuple[(int, int)]=(0, 0), rotation: int=0) -> None:
(a, b) = origin
self.origin = (a, b)
self.size = size
self.rotation = rotation
self._initialize_layout()
def _initialize_layout(self) -> None:
(up_qubits, down_qubits) = _find_line_qubits(self.size, self.origin, self.rotation)
(up_even_pairs, up_odd_pairs) = _get_even_odd_pairs(up_qubits)
(down_even_pairs, down_odd_pairs) = _get_even_odd_pairs(down_qubits)
self._hop_even_pairs = (down_even_pairs + up_even_pairs)
self._hop_odd_pairs = (down_odd_pairs + up_odd_pairs)
self._up_qubits = up_qubits
self._down_qubits = down_qubits
def cirq_resolvers(cls) -> Dict[(str, Optional[Type])]:
return {cls.__name__: cls, f'recirq.{cls.__name__}': cls}
def _json_namespace_(cls):
return 'recirq'
def up_qubits(self) -> List[cirq.GridQubit]:
return self._up_qubits
def down_qubits(self) -> List[cirq.GridQubit]:
return self._down_qubits
def all_qubits(self) -> List[cirq.GridQubit]:
return (self.up_qubits + self.down_qubits)
def up_even_pairs(self) -> GridQubitPairs:
return _get_even_pairs(self.up_qubits)
def down_even_pairs(self) -> GridQubitPairs:
return _get_even_pairs(self.down_qubits)
def up_odd_pairs(self) -> GridQubitPairs:
return _get_odd_pairs(self.up_qubits)
def down_odd_pairs(self) -> GridQubitPairs:
return _get_odd_pairs(self.down_qubits)
def interaction_pairs(self) -> GridQubitPairs:
return list(zip(self.up_qubits, self._down_qubits))
def default_layout(self) -> 'LineLayout':
return LineLayout(size=self.size)
def text_diagram(self, draw_grid_coords: bool=True) -> str:
return _draw_chains(self.up_qubits, self.down_qubits, self.interaction_pairs, draw_grid_coords)
def _json_dict_(self):
return cirq.dataclass_json_dict(self) |
('pypyr.utils.filesystem.get_glob', autospec=True)
def test_glob_list(mock_glob):
context = Context({'ok1': 'ov1', 'glob': ['./arb/x', './arb/y', './arb/z']})
mock_glob.return_value = ['./f1.1', './f2.1', './f2.2', './f2.3']
with patch_logger('pypyr.steps.glob', logging.INFO) as mock_logger_info:
glob_step.run_step(context)
mock_logger_info.assert_called_once_with('glob checked 3 globs and saved 4 paths to globOut')
assert context, "context shouldn't be None"
assert (len(context) == 3), 'context should have 3 items'
assert (context['ok1'] == 'ov1')
assert (context['glob'] == ['./arb/x', './arb/y', './arb/z'])
assert (context['globOut'] == ['./f1.1', './f2.1', './f2.2', './f2.3'])
mock_glob.assert_called_once_with(['./arb/x', './arb/y', './arb/z']) |
class LDAPControl(RequestControl, ResponseControl):
def __init__(self, controlType=None, criticality=False, controlValue=None, encodedControlValue=None):
self.controlType = controlType
self.criticality = criticality
self.controlValue = controlValue
self.encodedControlValue = encodedControlValue |
class CocoStuff164k(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 182
self.palette = palette.COCO_palette
super(CocoStuff164k, self).__init__(**kwargs)
def _set_files(self):
if (self.split in ['train2017', 'val2017']):
file_list = sorted(glob(os.path.join(self.root, 'images', (self.split + '/*.jpg'))))
self.files = [os.path.basename(f).split('.')[0] for f in file_list]
else:
raise ValueError(f'Invalid split name {self.split}, either train2017 or val2017')
def _load_data(self, index):
image_id = self.files[index]
image_path = os.path.join(self.root, 'images', self.split, (image_id + '.jpg'))
label_path = os.path.join(self.root, 'annotations', self.split, (image_id + '.png'))
image = np.asarray(Image.open(image_path).convert('RGB'), dtype=np.float32)
label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
return (image, label, image_id) |
class Trainer(object):
def __init__(self, train_learner, eval_learner, is_training, train_dataset_list, eval_dataset_list, restrict_classes, restrict_num_per_class, checkpoint_dir, summary_dir, records_root_dir, eval_finegrainedness, eval_finegrainedness_split, eval_imbalance_dataset, omit_from_saving_and_reloading, train_episode_config, eval_episode_config, learn_config, learner_config, data_config):
self.train_learner_class = train_learner
self.eval_learner_class = eval_learner
self.is_training = is_training
self.train_dataset_list = train_dataset_list
self.eval_dataset_list = eval_dataset_list
self.restrict_classes = restrict_classes
self.restrict_num_per_class = restrict_num_per_class
self.checkpoint_dir = checkpoint_dir
self.summary_dir = summary_dir
self.records_root_dir = records_root_dir
self.eval_finegrainedness = eval_finegrainedness
self.eval_finegrainedness_split = eval_finegrainedness_split
self.eval_imbalance_dataset = eval_imbalance_dataset
self.omit_from_saving_and_reloading = omit_from_saving_and_reloading
self.eval_split = (VALID_SPLIT if is_training else TEST_SPLIT)
if eval_finegrainedness:
self.eval_split = eval_finegrainedness_split
if (eval_finegrainedness or eval_imbalance_dataset):
logging.info('Forcing the number of %s classes to be 2, since the finegrainedness analysis is applied on binary classification tasks only.', eval_finegrainedness_split)
if (eval_finegrainedness and (eval_finegrainedness_split == TRAIN_SPLIT)):
train_episode_config.num_ways = 2
else:
eval_episode_config.num_ways = 2
self.num_train_classes = train_episode_config.num_ways
self.num_test_classes = eval_episode_config.num_ways
self.num_support_train = train_episode_config.num_support
self.num_query_train = train_episode_config.num_query
self.num_support_eval = eval_episode_config.num_support
self.num_query_eval = eval_episode_config.num_query
self.learn_config = learn_config
self.learner_config = learner_config
self.train_episode_config = train_episode_config
self.eval_episode_config = eval_episode_config
if self.learn_config.transductive_batch_norm:
logging.warn('Using transductive batch norm!')
self.backprop_through_moments = True
self.data_config = data_config
self.image_shape = (([data_config.image_height] * 2) + [3])
self.benchmark_spec = self.get_benchmark_specification()
self.required_splits = ([TRAIN_SPLIT] if self.is_training else [])
self.required_splits += [self.eval_split]
self.split_episode_or_batch_specs = {}
self.next_data = {}
self.ema_object = None
self.learners = {}
self.embedding_fn = learner.NAME_TO_EMBEDDING_NETWORK[self.learner_config.embedding_network]
for split in self.required_splits:
if (split == TRAIN_SPLIT):
self.split_episode_or_batch_specs[split] = self._create_train_specification()
self.next_data[split] = self.build_data(split)
self.learners[split] = self.create_train_learner(self.train_learner_class, self.get_next(split))
elif (split in [VALID_SPLIT, TEST_SPLIT]):
self.split_episode_or_batch_specs[split] = self._create_held_out_specification(split)
self.next_data[split] = self.build_data(split)
self.learners[split] = self.create_eval_learner(self.eval_learner_class, self.get_next(split))
else:
raise UnexpectedSplitError(split)
self.losses = dict(zip(self.required_splits, [self.learners[split].compute_loss() for split in self.required_splits]))
self.accs = dict(zip(self.required_splits, [self.learners[split].compute_accuracy() for split in self.required_splits]))
self.set_way_shots_classes_logits_targets()
self.train_op = None
if self.is_training:
global_step = tf.train.get_or_create_global_step()
learning_rate = self.learner_config.learning_rate
if self.learner_config.decay_learning_rate:
learning_rate = tf.train.exponential_decay(self.learner_config.learning_rate, global_step, decay_steps=self.learner_config.decay_every, decay_rate=self.learner_config.decay_rate, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
self.optimizer = tf.train.AdamOptimizer(learning_rate)
self.train_op = self.get_train_op(global_step)
vars_to_restore = []
logging.info('Omitting from saving / reloading any variable that contains any of the following substrings: %s', omit_from_saving_and_reloading)
for var in tf.global_variables():
if (not any([(substring in var.name) for substring in omit_from_saving_and_reloading])):
vars_to_restore.append(var)
else:
logging.info('Omitting variable %s', var.name)
self.saver = tf.train.Saver(var_list=vars_to_restore, max_to_keep=500)
if (self.checkpoint_dir is not None):
if (not tf.io.gfile.exists(self.checkpoint_dir)):
tf.io.gfile.makedirs(self.checkpoint_dir)
self.initialize_session()
self.create_summary_writer()
def set_way_shots_classes_logits_targets(self):
raise NotImplementedError('Abstract Method.')
def maybe_set_way_shots_classes_logits_targets(self, skip_train=False):
(way, shots, class_props, class_ids, test_logits, test_targets) = ([], [], [], [], [], [])
for split in self.required_splits:
if ((split == TRAIN_SPLIT) and skip_train):
(way_, shots_, class_props_, class_ids_, test_logits_, test_targets_) = ([None] * 6)
else:
(way_, shots_, class_ids_) = compute_episode_stats(self.next_data[split])
class_props_ = None
if self.eval_imbalance_dataset:
class_props_ = compute_train_class_proportions(self.next_data[split], shots_, self.eval_imbalance_dataset_spec)
test_logits_ = self.learners[split].test_logits
test_targets_ = self.learners[split].test_targets
way.append(way_)
shots.append(shots_)
class_props.append(class_props_)
class_ids.append(class_ids_)
test_logits.append(test_logits_)
test_targets.append(test_targets_)
self.way = dict(zip(self.required_splits, way))
self.shots = dict(zip(self.required_splits, shots))
self.class_props = dict(zip(self.required_splits, class_props))
self.class_ids = dict(zip(self.required_splits, class_ids))
self.test_logits = dict(zip(self.required_splits, test_logits))
self.test_targets = dict(zip(self.required_splits, test_targets))
def create_summary_writer(self):
standard_summaries = []
for split in self.required_splits:
loss_summary = tf.summary.scalar(('%s_loss' % split), self.losses[split])
acc_summary = tf.summary.scalar(('%s_acc' % split), self.accs[split])
standard_summaries.append(loss_summary)
standard_summaries.append(acc_summary)
evaluation_summaries = self.add_eval_summaries()
self.standard_summaries = tf.summary.merge(standard_summaries)
self.evaluation_summaries = tf.summary.merge(evaluation_summaries)
self.summary_writer = None
if (self.summary_dir is not None):
self.summary_writer = tf.summary.FileWriter(self.summary_dir)
if (not tf.io.gfile.exists(self.summary_dir)):
tf.io.gfile.makedirs(self.summary_dir)
def create_train_learner(self, train_learner_class, episode_or_batch):
raise NotImplementedError('Abstract Method.')
def create_eval_learner(self, eval_learner_class, episode):
raise NotImplementedError('Abstract Method.')
def get_benchmark_specification(self, records_root_dir=None):
(data_spec_list, has_dag_ontology, has_bilevel_ontology, splits_to_contribute) = ([], [], [], [])
seen_datasets = set()
if self.is_training:
benchmark_datasets = (self.train_dataset_list + self.eval_dataset_list)
else:
benchmark_datasets = self.eval_dataset_list
if isinstance(records_root_dir, list):
if (len(records_root_dir) != len(benchmark_datasets)):
raise ValueError('The given records_root_dir is a list whose length is not the same as the number of benchmark datasets. Found datasets {} (for the {} phase) but len(records_root_dir) is {}. Expected their lengths to match or records_path to be a string').format(benchmark_datasets, len(records_root_dir))
records_roots_for_datasets = records_root_dir
elif isinstance(records_root_dir, six.text_type):
records_roots_for_datasets = ([records_root_dir] * len(benchmark_datasets))
elif (records_root_dir is None):
records_roots_for_datasets = ([self.records_root_dir] * len(benchmark_datasets))
for (dataset_name, dataset_records_root) in zip(benchmark_datasets, records_roots_for_datasets):
if (dataset_name in seen_datasets):
continue
dataset_records_path = os.path.join(dataset_records_root, dataset_name)
data_spec = dataset_spec_lib.load_dataset_spec(dataset_records_path)
has_dag = (dataset_name == 'ilsvrc_2012')
is_bilevel = (dataset_name == 'omniglot')
if (not self.is_training):
splits = {self.eval_split}
else:
splits = set()
if (dataset_name in self.train_dataset_list):
splits.add(TRAIN_SPLIT)
if (dataset_name in self.eval_dataset_list):
splits.add(VALID_SPLIT)
restricted_classes_per_split = {}
if (dataset_name in self.restrict_classes):
classes_per_split = self.restrict_classes[dataset_name]
for (split, num_classes) in classes_per_split.items():
episode_descr_config = (self.train_episode_config if (split == TRAIN_SPLIT) else self.eval_episode_config)
if (has_dag and (not episode_descr_config.ignore_dag_ontology)):
raise ValueError('Restrictions on the class set of a dataset with a DAG ontology are not supported when ignore_dag_ontology is False.')
if (is_bilevel and (not episode_descr_config.ignore_bilevel_ontology)):
raise ValueError('Restrictions on the class set of a dataset with a bilevel ontology are not supported when ignore_bilevel_ontology is False.')
restricted_classes_per_split[get_split_enum(split)] = num_classes
data_spec.initialize(restricted_classes_per_split)
tf.logging.info('Restrictions for dataset {}:'.format(dataset_name))
for split in list(splits):
num_classes = data_spec.get_classes(get_split_enum(split))
tf.logging.info('\t split {} is restricted to {} classes'.format(split, num_classes))
logging.info('Adding dataset %s', data_spec.name)
data_spec_list.append(data_spec)
has_dag_ontology.append(has_dag)
has_bilevel_ontology.append(is_bilevel)
splits_to_contribute.append(splits)
seen_datasets.add(dataset_name)
if self.eval_imbalance_dataset:
self.eval_imbalance_dataset_spec = data_spec
assert (len(data_spec_list) == 1), 'Imbalance analysis is only supported on one dataset at a time.'
benchmark_spec = dataset_spec_lib.BenchmarkSpecification('benchmark', self.image_shape, data_spec_list, has_dag_ontology, has_bilevel_ontology, splits_to_contribute)
splits_to_datasets = collections.defaultdict(list)
for (dataset_spec, splits_to_contribute) in zip(data_spec_list, splits_to_contribute):
for split in splits_to_contribute:
splits_to_datasets[split].append(dataset_spec.name)
for (split, datasets) in splits_to_datasets.items():
logging.info('Episodes for split %s will be created from %s', split, datasets)
return benchmark_spec
def initialize_session(self):
if ENABLE_TF_OPTIMIZATIONS:
self.sess = tf.Session()
else:
session_config = tf.ConfigProto()
rewrite_options = session_config.graph_options.rewrite_options
rewrite_options.disable_model_pruning = True
rewrite_options.constant_folding = rewrite_options.OFF
rewrite_options.arithmetic_optimization = rewrite_options.OFF
rewrite_options.remapping = rewrite_options.OFF
rewrite_options.shape_optimization = rewrite_options.OFF
rewrite_options.dependency_optimization = rewrite_options.OFF
rewrite_options.function_optimization = rewrite_options.OFF
rewrite_options.layout_optimizer = rewrite_options.OFF
rewrite_options.loop_optimization = rewrite_options.OFF
rewrite_options.memory_optimization = rewrite_options.NO_MEM_OPT
self.sess = tf.Session(config=session_config)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
if self.learner_config.checkpoint_for_eval:
self.saver.restore(self.sess, self.learner_config.checkpoint_for_eval)
logging.info('Restored checkpoint: %s', self.learner_config.checkpoint_for_eval)
else:
latest_checkpoint = None
if (self.checkpoint_dir is not None):
latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)
if latest_checkpoint:
self.saver.restore(self.sess, latest_checkpoint)
logging.info('Restored checkpoint: %s', latest_checkpoint)
else:
logging.info('No previous checkpoint.')
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
if (self.learner_config.pretrained_checkpoint and (not self.sess.run(tf.train.get_global_step()))):
baselinelearner_embed_vars_to_reload = []
for var in tf.global_variables():
is_relationnet_var = var.name.startswith('relationnet')
requested_to_omit = any([(substring in var.name) for substring in self.omit_from_saving_and_reloading])
is_embedding_var = any(((keyword in var.name) for keyword in EMBEDDING_KEYWORDS))
is_adam_var = (('Adam:' in var.name) or ('Adam_1:' in var.name))
if ((not is_relationnet_var) and (not requested_to_omit) and is_embedding_var and (not is_adam_var)):
if ('adam' in var.name.lower()):
logging.error('Variable name unexpectedly indicates it is both related to an embedding, and to ADAM: %s', var.name)
continue
baselinelearner_embed_vars_to_reload.append(var)
backbone_saver = tf.train.Saver(var_list=baselinelearner_embed_vars_to_reload, max_to_keep=1)
backbone_saver.restore(self.sess, self.learner_config.pretrained_checkpoint)
logging.info('Restored only vars %s from checkpoint: %s', [var.name for var in baselinelearner_embed_vars_to_reload], self.learner_config.pretrained_checkpoint)
def _create_held_out_specification(self, split=TEST_SPLIT):
split_enum = get_split_enum(split)
return learning_spec.EpisodeSpecification(split_enum, self.num_test_classes, self.num_support_eval, self.num_query_eval)
def _create_train_specification(self):
raise NotImplementedError('Abstract Method.')
def build_data(self, split):
raise NotImplementedError('Abstract method.')
def _restrict_dataset_list_for_split(self, split, splits_to_contribute, dataset_list):
updated_list = []
for (dataset_num, dataset_splits) in enumerate(splits_to_contribute):
if (split in dataset_splits):
updated_list.append(dataset_list[dataset_num])
return updated_list
def get_num_to_take(self, dataset_name, split):
num_to_take = (- 1)
if (dataset_name in self.restrict_num_per_class):
dataset_restrict_num_per_class = self.restrict_num_per_class[dataset_name]
if (split in dataset_restrict_num_per_class):
num_to_take = dataset_restrict_num_per_class[split]
return num_to_take
def build_episode(self, split):
shuffle_buffer_size = self.data_config.shuffle_buffer_size
read_buffer_size_bytes = self.data_config.read_buffer_size_bytes
num_prefetch = self.data_config.num_prefetch
(_, image_shape, dataset_spec_list, has_dag_ontology, has_bilevel_ontology, splits_to_contribute) = self.benchmark_spec
dataset_spec_list = self._restrict_dataset_list_for_split(split, splits_to_contribute, dataset_spec_list)
has_dag_ontology = self._restrict_dataset_list_for_split(split, splits_to_contribute, has_dag_ontology)
has_bilevel_ontology = self._restrict_dataset_list_for_split(split, splits_to_contribute, has_bilevel_ontology)
episode_spec = self.split_episode_or_batch_specs[split]
dataset_split = episode_spec[0]
image_size = image_shape[0]
if (image_shape[1] != image_size):
raise ValueError('Expected a square image shape, not {}'.format(image_shape))
if (split == TRAIN_SPLIT):
episode_descr_config = self.train_episode_config
elif (split in (VALID_SPLIT, TEST_SPLIT)):
episode_descr_config = self.eval_episode_config
else:
raise UnexpectedSplitError(split)
num_per_class = []
for dataset_spec in dataset_spec_list:
num_per_class.append(self.get_num_to_take(dataset_spec.name, split))
if (len(dataset_spec_list) == 1):
use_dag_ontology = has_dag_ontology[0]
if (self.eval_finegrainedness or self.eval_imbalance_dataset):
use_dag_ontology = False
data_pipeline = pipeline.make_one_source_episode_pipeline(dataset_spec_list[0], use_dag_ontology=use_dag_ontology, use_bilevel_ontology=has_bilevel_ontology[0], split=dataset_split, episode_descr_config=episode_descr_config, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_size, num_to_take=num_per_class[0])
else:
data_pipeline = pipeline.make_multisource_episode_pipeline(dataset_spec_list, use_dag_ontology_list=has_dag_ontology, use_bilevel_ontology_list=has_bilevel_ontology, split=dataset_split, episode_descr_config=episode_descr_config, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_size, num_to_take=num_per_class)
data_pipeline = apply_dataset_options(data_pipeline)
iterator = data_pipeline.make_one_shot_iterator()
(episode, _) = iterator.get_next()
(support_images, support_labels, support_class_ids, query_images, query_labels, query_class_ids) = episode
return providers.EpisodeDataset(train_images=support_images, test_images=query_images, train_labels=support_labels, test_labels=query_labels, train_class_ids=support_class_ids, test_class_ids=query_class_ids)
def build_batch(self, split):
shuffle_buffer_size = self.data_config.shuffle_buffer_size
read_buffer_size_bytes = self.data_config.read_buffer_size_bytes
num_prefetch = self.data_config.num_prefetch
(_, image_shape, dataset_spec_list, _, _, splits_to_contribute) = self.benchmark_spec
dataset_spec_list = self._restrict_dataset_list_for_split(split, splits_to_contribute, dataset_spec_list)
num_per_class = []
for dataset_spec in dataset_spec_list:
num_per_class.append(self.get_num_to_take(dataset_spec.name, split))
(dataset_split, batch_size) = self.split_episode_or_batch_specs[split]
for dataset_spec in dataset_spec_list:
if (dataset_spec.name in DATASETS_WITH_EXAMPLE_SPLITS):
raise ValueError('Batch pipeline is used only at meta-train time, and does not handle datasets with example splits, which should only be used at meta-test (evaluation) time.')
if (len(dataset_spec_list) == 1):
data_pipeline = pipeline.make_one_source_batch_pipeline(dataset_spec_list[0], split=dataset_split, batch_size=batch_size, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_shape[0], num_to_take=num_per_class[0])
else:
data_pipeline = pipeline.make_multisource_batch_pipeline(dataset_spec_list, split=dataset_split, batch_size=batch_size, shuffle_buffer_size=shuffle_buffer_size, read_buffer_size_bytes=read_buffer_size_bytes, num_prefetch=num_prefetch, image_size=image_shape[0], num_to_take=num_per_class)
data_pipeline = apply_dataset_options(data_pipeline)
iterator = data_pipeline.make_one_shot_iterator()
((images, class_ids), _) = iterator.get_next()
return providers.Batch(images=images, labels=class_ids)
def get_next(self, split):
if (split not in [TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT]):
raise ValueError('Invalid split. Expected one of "train", "valid", or "test".')
return self.next_data[split]
def get_train_op(self, global_step):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = self.optimizer.minimize(self.losses[TRAIN_SPLIT], global_step=global_step)
return train_op
def get_updated_global_step(self):
with tf.control_dependencies([self.train_op]):
global_step = tf.identity(tf.train.get_global_step())
return global_step
def train(self):
global_step = self.sess.run(tf.train.get_global_step())
logging.info('Starting training from global_step: %d', global_step)
updated_global_step = self.get_updated_global_step()
self.valid_acc = np.nan
self.valid_ci = np.nan
self.maybe_evaluate(global_step)
while (global_step < self.learn_config.num_updates):
(_, train_loss, train_acc, global_step) = self.sess.run([self.train_op, self.losses[TRAIN_SPLIT], self.accs[TRAIN_SPLIT], updated_global_step])
self.maybe_evaluate(global_step)
if (not (global_step % self.learn_config.log_every)):
message = ('Update %d. Train loss: %f, Train accuracy: %f, Valid accuracy %f +/- %f.\n' % (global_step, train_loss, train_acc, self.valid_acc, self.valid_ci))
logging.info(message)
summaries = self.sess.run(self.standard_summaries)
if self.summary_writer:
self.summary_writer.add_summary(summaries, global_step)
should_save = (self.checkpoint_dir is not None)
if (should_save and ((global_step % self.learn_config.checkpoint_every) == 0)):
save_path = self.saver.save(self.sess, os.path.join(self.checkpoint_dir, ('model_%d.ckpt' % global_step)))
logging.info('Model checkpoint saved: %s', save_path)
def maybe_evaluate(self, global_step):
if (not (global_step % self.learn_config.validate_every)):
(valid_acc, valid_ci, valid_acc_summary, valid_ci_summary) = self.evaluate(VALID_SPLIT)
if self.summary_writer:
self.summary_writer.add_summary(valid_acc_summary, global_step)
self.summary_writer.add_summary(valid_ci_summary, global_step)
self.valid_acc = valid_acc
self.valid_ci = valid_ci
def evaluate(self, split):
num_eval_trials = self.learn_config.num_eval_episodes
logging.info('Performing evaluation of the %s split using %d episodes...', split, num_eval_trials)
accuracies = []
for eval_trial_num in range(num_eval_trials):
(acc, summaries) = self.sess.run([self.accs[split], self.evaluation_summaries])
accuracies.append(acc)
if ((not self.is_training) and self.summary_writer):
self.summary_writer.add_summary(summaries, eval_trial_num)
logging.info('Done.')
mean_acc = np.mean(accuracies)
ci_acc = ((np.std(accuracies) * 1.96) / np.sqrt(len(accuracies)))
if (split == TEST_SPLIT):
logging.info('Test accuracy: %f, +/- %f.\n', mean_acc, ci_acc)
mean_acc_summary = tf.Summary()
mean_acc_summary.value.add(tag=('mean %s acc' % split), simple_value=mean_acc)
ci_acc_summary = tf.Summary()
ci_acc_summary.value.add(tag=('%s acc CI' % split), simple_value=ci_acc)
return (mean_acc, ci_acc, mean_acc_summary, ci_acc_summary)
def add_eval_summaries_split(self, split):
split_eval_summaries = []
way_summary = tf.summary.scalar(('%s_way' % split), self.way[split])
shots_summary = tf.summary.tensor_summary(('%s_shots' % split), self.shots[split])
classes_summary = tf.summary.tensor_summary(('%s_class_ids' % split), self.class_ids[split])
logits_summary = tf.summary.tensor_summary(('%s_test_logits' % split), self.test_logits[split])
targets_summary = tf.summary.tensor_summary(('%s_test_targets' % split), self.test_targets[split])
if self.eval_imbalance_dataset:
class_props_summary = tf.summary.tensor_summary(('%s_class_props' % split), self.class_props[split])
split_eval_summaries.append(class_props_summary)
split_eval_summaries.append(way_summary)
split_eval_summaries.append(shots_summary)
split_eval_summaries.append(classes_summary)
split_eval_summaries.append(logits_summary)
split_eval_summaries.append(targets_summary)
return split_eval_summaries |
class DQN(object):
def __init__(self, hps, name_variable):
self._hps = hps
self._name_variable = name_variable
def variable_summaries(self, var_name, var):
with tf.name_scope('summaries_{}'.format(var_name)):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square((var - mean))))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def _add_placeholders(self):
self._x = tf.placeholder(tf.float32, [None, self._hps.dqn_input_feature_len], name='x')
self._y = tf.placeholder(tf.float32, [None, self._hps.vocab_size], name='y')
self._train_step = tf.placeholder(tf.int32, None, name='train_step')
def _make_feed_dict(self, batch):
feed_dict = {}
feed_dict[self._x] = batch._x
feed_dict[self._y] = batch._y
return feed_dict
def _add_tf_layers(self):
h = tf.layers.dense(self._x, units=self._hps.dqn_input_feature_len, activation=tf.nn.relu, name='{}_input_layer'.format(self._name_variable))
for (i, layer) in enumerate(self._hps.dqn_layers.split(',')):
h = tf.layers.dense(h, units=int(layer), activation=tf.nn.relu, name='{}_h_{}'.format(self._name_variable, i))
self.advantage_layer = tf.layers.dense(h, units=self._hps.vocab_size, activation=tf.nn.softmax, name='{}_advantage'.format(self._name_variable))
if self._hps.dueling_net:
self.value_layer = tf.layers.dense(h, units=1, activation=tf.identity, name='{}_value'.format(self._name_variable))
normalized_al = (self.advantage_layer - tf.reshape(tf.reduce_mean(self.advantage_layer, axis=1), [(- 1), 1]))
value_extended = tf.concat(([self.value_layer] * self._hps.vocab_size), axis=1)
self.output = (value_extended + normalized_al)
else:
self.output = self.advantage_layer
def _add_train_op(self):
self.loss = tf.losses.mean_squared_error(labels=self._y, predictions=self.output)
tvars = tf.trainable_variables()
gradients = tf.gradients(self.loss, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
with tf.device('/gpu:{}'.format(self._hps.dqn_gpu_num)):
(grads, global_norm) = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
tf.summary.scalar('global_norm', global_norm)
optimizer = tf.train.AdamOptimizer(self._hps.lr)
with tf.device('/gpu:{}'.format(self._hps.dqn_gpu_num)):
self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
self.variable_summaries('dqn_loss', self.loss)
def _add_update_weights_op(self):
self.model_trainables = tf.trainable_variables(scope='{}_relay_network'.format(self._name_variable))
self._new_trainables = [tf.placeholder(tf.float32, None, name='trainables_{}'.format(i)) for i in range(len(self.model_trainables))]
self.assign_ops = []
if self._hps.dqn_polyak_averaging:
tau = ((tf.cast(self._train_step, tf.float32) % self._hps.dqn_target_update) / float(self._hps.dqn_target_update))
for (i, mt) in enumerate(self.model_trainables):
nt = self._new_trainables[i]
self.assign_ops.append(mt.assign(((tau * mt) + ((1 - tau) * nt))))
elif ((self._train_step % self._hps.dqn_target_update) == 0):
for (i, mt) in enumerate(self.model_trainables):
nt = self._new_trainables[i]
self.assign_ops.append(mt.assign(nt))
def build_graph(self):
with tf.variable_scope('{}_relay_network'.format(self._name_variable)), tf.device('/gpu:{}'.format(self._hps.dqn_gpu_num)):
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self._add_placeholders()
self._add_tf_layers()
self._add_train_op()
self._add_update_weights_op()
self._summaries = tf.summary.merge_all()
def run_train_steps(self, sess, batch):
feed_dict = self._make_feed_dict(batch)
to_return = {'train_op': self.train_op, 'summaries': self._summaries, 'loss': self.loss, 'global_step': self.global_step}
return sess.run(to_return, feed_dict)
def run_test_steps(self, sess, x, y=None, return_loss=False, return_best_action=False):
feed_dict = {self._x: x}
to_return = {'estimates': self.output}
if return_loss:
feed_dict.update({self._y: y})
to_return.update({'loss': self.loss})
output = sess.run(to_return, feed_dict)
if return_best_action:
output['best_action'] = np.argmax(output['estimates'], axis=1)
return output
def run_update_weights(self, sess, train_step, weights):
feed_dict = {self._train_step: train_step}
for (i, w) in enumerate(weights):
feed_dict.update({self._new_trainables[i]: w})
_ = sess.run(self.assign_ops, feed_dict) |
class TestSelectionNotify(EndianTest):
def setUp(self):
self.evt_args_0 = {'property': , 'requestor': , 'selection': , 'sequence_number': 25394, 'target': , 'time': , 'type': 165}
self.evt_bin_0 = b'\xa5\x00c2\x18f\xeb\xaav\x00\xc6\x8aL\xb9g\xb0A\x0f\t\x9b_\x87\x83\x9e\x00\x00\x00\x00\x00\x00\x00\x00'
def testPack0(self):
bin = event.SelectionNotify._fields.to_binary(*(), **self.evt_args_0)
self.assertBinaryEqual(bin, self.evt_bin_0)
def testUnpack0(self):
(args, remain) = event.SelectionNotify._fields.parse_binary(self.evt_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.evt_args_0) |
class TensorVariable(_tensor_py_operators, Variable[(_TensorTypeType, OptionalApplyType)]):
def __init__(self, type: _TensorTypeType, owner: OptionalApplyType, index=None, name=None):
super().__init__(type, owner, index=index, name=name)
if ((config.warn_float64 != 'ignore') and (type.dtype == 'float64')):
msg = 'You are creating a TensorVariable with float64 dtype. You requested an action via the PyTensor flag warn_float64={ignore,warn,raise,pdb}.'
if (config.warn_float64 == 'warn'):
x = tb.extract_stack()
nb_rm = 0
while x:
file_path = x[(- 1)][0]
rm = False
for p in ['pytensor/tensor/', 'pytensor\\tensor\\', 'pytensor/graph/', 'pytensor\\tensor\\']:
if (p in file_path):
x = x[:(- 1)]
nb_rm += 1
rm = True
break
if (not rm):
break
warnings.warn(msg, stacklevel=(1 + nb_rm))
elif (config.warn_float64 == 'raise'):
raise Exception(msg)
elif (config.warn_float64 == 'pdb'):
import pdb
pdb.set_trace() |
class TestLoadNetCDFXArray(TestLoadNetCDF):
def setup_method(self):
if (sys.version_info.minor >= 10):
self.tempdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)
else:
self.tempdir = tempfile.TemporaryDirectory()
self.saved_path = pysat.params['data_dirs']
pysat.params['data_dirs'] = self.tempdir.name
self.testInst = pysat.Instrument(platform='pysat', name='ndtesting', update_files=True, num_samples=100, use_header=True)
self.stime = pysat.instruments.pysat_ndtesting._test_dates['']['']
self.epoch_name = 'time'
self.loaded_inst = None
return
def teardown_method(self):
pysat.params['data_dirs'] = self.saved_path
del self.loaded_inst, self.testInst, self.stime, self.epoch_name
try:
self.tempdir.cleanup()
except Exception:
pass
del self.tempdir, self.saved_path
return
.parametrize('kwargs,target', [({}, False), ({'decode_timedelta': False}, False), ({'decode_timedelta': True}, True)])
def test_read_netcdf4_with_time_meta_labels(self, kwargs, target):
outfile = os.path.join(self.tempdir.name, 'pysat_test_ncdf.nc')
self.testInst.load(date=self.stime, use_header=True)
self.testInst.meta['uts'] = {'units': 'seconds'}
self.testInst.meta['mlt'] = {'units': 'minutes'}
self.testInst.meta['slt'] = {'units': 'hours'}
io.inst_to_netcdf(self.testInst, fname=outfile)
tkwargs = decode_times_val(self.testInst.pandas_format)
(self.loaded_inst, meta) = io.load_netcdf(outfile, pandas_format=self.testInst.pandas_format, **kwargs, **tkwargs)
vars = ['uts', 'mlt', 'slt']
for var in vars:
val = self.loaded_inst[var].values[0]
assert (isinstance(val, np.timedelta64) == target), 'Variable {:} not loaded correctly'.format(var)
return
def test_load_netcdf_pandas_3d_error(self):
outfile = os.path.join(self.tempdir.name, 'pysat_test_ncdf.nc')
self.testInst.load(date=self.stime, use_header=True)
io.inst_to_netcdf(self.testInst, fname=outfile)
testing.eval_bad_input(io.load_netcdf, ValueError, 'only supports 1D and 2D data in pandas', input_args=[outfile], input_kwargs={'epoch_name': 'time', 'pandas_format': True})
return |
class FastConsumerFactory(_BaseKafkaQueueConsumerFactory):
def _commit_callback(err: confluent_kafka.KafkaError, topic_partition_list: List[confluent_kafka.TopicPartition]) -> None:
for topic_partition in topic_partition_list:
topic = topic_partition.topic
partition = topic_partition.partition
offset = topic_partition.offset
if topic_partition.error:
logger.error('commit error topic %s partition %s offset %s', topic, partition, offset)
elif (offset == confluent_kafka.OFFSET_INVALID):
pass
else:
logger.debug('commit success topic %s partition %s offset %s', topic, partition, offset)
def _consumer_config(cls) -> Dict[(str, Any)]:
return {'heartbeat.interval.ms': 3000, 'session.timeout.ms': 10000, 'max.poll.interval.ms': 300000, 'enable.auto.commit': 'true', 'auto.commit.interval.ms': 5000, 'enable.auto.offset.store': 'true', 'on_commit': cls._commit_callback} |
class ClassNodeTest(ModuleLoader, unittest.TestCase):
def test_dict_interface(self) -> None:
_test_dict_interface(self, self.module['YOUPI'], 'method')
def test_cls_special_attributes_1(self) -> None:
cls = self.module['YO']
self.assertEqual(len(cls.getattr('__bases__')), 1)
self.assertEqual(len(cls.getattr('__name__')), 1)
self.assertIsInstance(cls.getattr('__name__')[0], nodes.Const)
self.assertEqual(cls.getattr('__name__')[0].value, 'YO')
self.assertEqual(len(cls.getattr('__doc__')), 1)
self.assertIsInstance(cls.getattr('__doc__')[0], nodes.Const)
self.assertEqual(cls.getattr('__doc__')[0].value, 'hehe\n haha')
module_attr_num = 4
self.assertEqual(len(cls.getattr('__module__')), module_attr_num)
self.assertIsInstance(cls.getattr('__module__')[0], nodes.Const)
self.assertEqual(cls.getattr('__module__')[0].value, 'data.module')
self.assertEqual(len(cls.getattr('__dict__')), 1)
if (not cls.newstyle):
self.assertRaises(AttributeInferenceError, cls.getattr, '__mro__')
for cls in (nodes.List._proxied, nodes.Const(1)._proxied):
self.assertEqual(len(cls.getattr('__bases__')), 1)
self.assertEqual(len(cls.getattr('__name__')), 1)
self.assertEqual(len(cls.getattr('__doc__')), 1, (cls, cls.getattr('__doc__')))
self.assertEqual(cls.getattr('__doc__')[0].value, cls.doc_node.value)
self.assertEqual(len(cls.getattr('__module__')), 4)
self.assertEqual(len(cls.getattr('__dict__')), 1)
self.assertEqual(len(cls.getattr('__mro__')), 1)
def test__mro__attribute(self) -> None:
node = builder.extract_node('\n class A(object): pass\n class B(object): pass\n class C(A, B): pass\n ')
assert isinstance(node, nodes.ClassDef)
mro = node.getattr('__mro__')[0]
self.assertIsInstance(mro, nodes.Tuple)
self.assertEqual(mro.elts, node.mro())
def test__bases__attribute(self) -> None:
node = builder.extract_node('\n class A(object): pass\n class B(object): pass\n class C(A, B): pass\n class D(C): pass\n ')
assert isinstance(node, nodes.ClassDef)
bases = node.getattr('__bases__')[0]
self.assertIsInstance(bases, nodes.Tuple)
self.assertEqual(len(bases.elts), 1)
self.assertIsInstance(bases.elts[0], nodes.ClassDef)
self.assertEqual(bases.elts[0].name, 'C')
def test_cls_special_attributes_2(self) -> None:
astroid = builder.parse('\n class A(object): pass\n class B(object): pass\n\n A.__bases__ += (B,)\n ', __name__)
self.assertEqual(len(astroid['A'].getattr('__bases__')), 2)
self.assertIsInstance(astroid['A'].getattr('__bases__')[1], nodes.Tuple)
self.assertIsInstance(astroid['A'].getattr('__bases__')[0], nodes.AssignAttr)
def test_instance_special_attributes(self) -> None:
for inst in (Instance(self.module['YO']), nodes.List(), nodes.Const(1)):
self.assertRaises(AttributeInferenceError, inst.getattr, '__mro__')
self.assertRaises(AttributeInferenceError, inst.getattr, '__bases__')
self.assertRaises(AttributeInferenceError, inst.getattr, '__name__')
self.assertEqual(len(inst.getattr('__dict__')), 1)
self.assertEqual(len(inst.getattr('__doc__')), 1)
def test_navigation(self) -> None:
klass = self.module['YO']
self.assertEqual(klass.statement(), klass)
self.assertEqual(klass.statement(), klass)
l_sibling = klass.previous_sibling()
self.assertTrue(isinstance(l_sibling, nodes.FunctionDef), l_sibling)
self.assertEqual(l_sibling.name, 'global_access')
r_sibling = klass.next_sibling()
self.assertIsInstance(r_sibling, nodes.ClassDef)
self.assertEqual(r_sibling.name, 'YOUPI')
def test_local_attr_ancestors(self) -> None:
module = builder.parse('\n class A():\n def __init__(self): pass\n class B(A): pass\n class C(B): pass\n class D(object): pass\n class F(): pass\n class E(F, D): pass\n ')
klass2 = module['C']
it = klass2.local_attr_ancestors('__init__')
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.ClassDef)
self.assertEqual(anc_klass.name, 'A')
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.ClassDef)
self.assertEqual(anc_klass.name, 'object')
self.assertRaises(StopIteration, partial(next, it))
it = klass2.local_attr_ancestors('method')
self.assertRaises(StopIteration, partial(next, it))
klass2 = module['E']
it = klass2.local_attr_ancestors('__init__')
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.ClassDef)
self.assertEqual(anc_klass.name, 'object')
self.assertRaises(StopIteration, partial(next, it))
def test_local_attr_mro(self) -> None:
module = builder.parse('\n class A(object):\n def __init__(self): pass\n class B(A):\n def __init__(self, arg, arg2): pass\n class C(A): pass\n class D(C, B): pass\n ')
dclass = module['D']
init = dclass.local_attr('__init__')[0]
self.assertIsInstance(init, nodes.FunctionDef)
self.assertEqual(init.parent.name, 'B')
cclass = module['C']
init = cclass.local_attr('__init__')[0]
self.assertIsInstance(init, nodes.FunctionDef)
self.assertEqual(init.parent.name, 'A')
ancestors = list(dclass.local_attr_ancestors('__init__'))
self.assertEqual([node.name for node in ancestors], ['B', 'A', 'object'])
def test_instance_attr_ancestors(self) -> None:
klass2 = self.module['YOUPI']
it = klass2.instance_attr_ancestors('yo')
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.ClassDef)
self.assertEqual(anc_klass.name, 'YO')
self.assertRaises(StopIteration, partial(next, it))
klass2 = self.module['YOUPI']
it = klass2.instance_attr_ancestors('member')
self.assertRaises(StopIteration, partial(next, it))
def test_methods(self) -> None:
expected_methods = {'__init__', 'class_method', 'method', 'static_method'}
klass2 = self.module['YOUPI']
methods = {m.name for m in klass2.methods()}
self.assertTrue(methods.issuperset(expected_methods))
methods = {m.name for m in klass2.mymethods()}
self.assertSetEqual(expected_methods, methods)
klass2 = self.module2['Specialization']
methods = {m.name for m in klass2.mymethods()}
self.assertSetEqual(set(), methods)
method_locals = klass2.local_attr('method')
self.assertEqual(len(method_locals), 1)
self.assertEqual(method_locals[0].name, 'method')
self.assertRaises(AttributeInferenceError, klass2.local_attr, 'nonexistent')
methods = {m.name for m in klass2.methods()}
self.assertTrue(methods.issuperset(expected_methods))
def test_ancestors(self) -> None:
klass = self.module['YOUPI']
self.assertEqual(['YO', 'object'], [a.name for a in klass.ancestors()])
klass = self.module2['Specialization']
self.assertEqual(['YOUPI', 'YO', 'object'], [a.name for a in klass.ancestors()])
def test_type(self) -> None:
klass = self.module['YOUPI']
self.assertEqual(klass.type, 'class')
klass = self.module2['Metaclass']
self.assertEqual(klass.type, 'metaclass')
klass = self.module2['MyException']
self.assertEqual(klass.type, 'exception')
klass = self.module2['MyError']
self.assertEqual(klass.type, 'exception')
klass = self.module2['NotMetaclass']
self.assertEqual(klass.type, 'class')
def test_inner_classes(self) -> None:
eee = self.nonregr['Ccc']['Eee']
self.assertEqual([n.name for n in eee.ancestors()], ['Ddd', 'Aaa', 'object'])
def test_classmethod_attributes(self) -> None:
data = '\n class WebAppObject(object):\n def registered(cls, application):\n cls.appli = application\n cls.schema = application.schema\n cls.config = application.config\n return cls\n registered = classmethod(registered)\n '
astroid = builder.parse(data, __name__)
cls = astroid['WebAppObject']
assert_keys = ['__module__', '__qualname__', 'appli', 'config', 'registered', 'schema']
self.assertEqual(sorted(cls.locals.keys()), assert_keys)
def test_class_getattr(self) -> None:
data = '\n class WebAppObject(object):\n appli = application\n appli += 2\n del self.appli\n '
astroid = builder.parse(data, __name__)
cls = astroid['WebAppObject']
self.assertEqual(len(cls.getattr('appli')), 2)
def test_instance_getattr(self) -> None:
data = '\n class WebAppObject(object):\n def __init__(self, application):\n self.appli = application\n self.appli += 2\n del self.appli\n '
astroid = builder.parse(data)
inst = Instance(astroid['WebAppObject'])
self.assertEqual(len(inst.getattr('appli')), 2)
def test_instance_getattr_with_class_attr(self) -> None:
data = '\n class Parent:\n aa = 1\n cc = 1\n\n class Klass(Parent):\n aa = 0\n bb = 0\n\n def incr(self, val):\n self.cc = self.aa\n if val > self.aa:\n val = self.aa\n if val < self.bb:\n val = self.bb\n self.aa += val\n '
astroid = builder.parse(data)
inst = Instance(astroid['Klass'])
self.assertEqual(len(inst.getattr('aa')), 3, inst.getattr('aa'))
self.assertEqual(len(inst.getattr('bb')), 1, inst.getattr('bb'))
self.assertEqual(len(inst.getattr('cc')), 2, inst.getattr('cc'))
def test_getattr_method_transform(self) -> None:
data = '\n class Clazz(object):\n\n def m1(self, value):\n self.value = value\n m2 = m1\n\n def func(arg1, arg2):\n "function that will be used as a method"\n return arg1.value + arg2\n\n Clazz.m3 = func\n inst = Clazz()\n inst.m4 = func\n '
astroid = builder.parse(data)
cls = astroid['Clazz']
for method in ('m1', 'm2', 'm3'):
inferred = list(cls.igetattr(method))
self.assertEqual(len(inferred), 1)
self.assertIsInstance(inferred[0], UnboundMethod)
inferred = list(Instance(cls).igetattr(method))
self.assertEqual(len(inferred), 1)
self.assertIsInstance(inferred[0], BoundMethod)
inferred = list(Instance(cls).igetattr('m4'))
self.assertEqual(len(inferred), 1)
self.assertIsInstance(inferred[0], nodes.FunctionDef)
def test_getattr_from_grandpa(self) -> None:
data = '\n class Future:\n attr = 1\n\n class Present(Future):\n pass\n\n class Past(Present):\n pass\n '
astroid = builder.parse(data)
past = astroid['Past']
attr = past.getattr('attr')
self.assertEqual(len(attr), 1)
attr1 = attr[0]
self.assertIsInstance(attr1, nodes.AssignName)
self.assertEqual(attr1.name, 'attr')
def test_getattr_with_enpty_annassign() -> None:
code = '\n class Parent:\n attr: int = 2\n\n class Child(Parent): #\n attr: int\n '
child = extract_node(code)
attr = child.getattr('attr')
assert (len(attr) == 1)
assert isinstance(attr[0], nodes.AssignName)
assert (attr[0].name == 'attr')
assert (attr[0].lineno == 3)
def test_function_with_decorator_lineno(self) -> None:
data = '\n (a=2,\n b=3)\n def g1(x):\n print(x)\n\n (a=2,\n b=3,\n )\n def g2():\n pass\n '
astroid = builder.parse(data)
self.assertEqual(astroid['g1'].fromlineno, 4)
self.assertEqual(astroid['g1'].tolineno, 5)
if (PY38 and IS_PYPY):
self.assertEqual(astroid['g2'].fromlineno, 9)
else:
self.assertEqual(astroid['g2'].fromlineno, 10)
self.assertEqual(astroid['g2'].tolineno, 11)
def test_metaclass_error(self) -> None:
astroid = builder.parse('\n class Test(object):\n __metaclass__ = typ\n ')
klass = astroid['Test']
self.assertFalse(klass.metaclass())
def test_metaclass_yes_leak(self) -> None:
astroid = builder.parse('\n # notice `ab` instead of `abc`\n from ab import ABCMeta\n\n class Meta(object):\n __metaclass__ = ABCMeta\n ')
klass = astroid['Meta']
self.assertIsNone(klass.metaclass())
def test_metaclass_type(self) -> None:
klass = builder.extract_node('\n def with_metaclass(meta, base=object):\n return meta("NewBase", (base, ), {})\n\n class ClassWithMeta(with_metaclass(type)): #\n pass\n ')
assert isinstance(klass, nodes.ClassDef)
self.assertEqual(['NewBase', 'object'], [base.name for base in klass.ancestors()])
def test_no_infinite_metaclass_loop(self) -> None:
klass = builder.extract_node("\n class SSS(object):\n\n class JJJ(object):\n pass\n\n \n def Init(cls):\n cls.JJJ = type('JJJ', (cls.JJJ,), {})\n\n class AAA(SSS):\n pass\n\n class BBB(AAA.JJJ):\n pass\n ")
assert isinstance(klass, nodes.ClassDef)
self.assertFalse(_is_metaclass(klass))
ancestors = [base.name for base in klass.ancestors()]
self.assertIn('object', ancestors)
self.assertIn('JJJ', ancestors)
def test_no_infinite_metaclass_loop_with_redefine(self) -> None:
ast_nodes = builder.extract_node('\n import datetime\n\n class A(datetime.date): #\n \n def now(cls):\n return cls()\n\n class B(datetime.date): #\n pass\n\n datetime.date = A\n datetime.date = B\n ')
for klass in ast_nodes:
self.assertEqual(None, klass.metaclass())
(HAS_SIX, 'These tests require the six library')
def test_metaclass_generator_hack(self):
klass = builder.extract_node('\n import six\n\n class WithMeta(six.with_metaclass(type, object)): #\n pass\n ')
assert isinstance(klass, nodes.ClassDef)
self.assertEqual(['object'], [base.name for base in klass.ancestors()])
self.assertEqual('type', klass.metaclass().name)
(HAS_SIX, 'These tests require the six library')
def test_metaclass_generator_hack_enum_base(self):
'Regression test for
klass = builder.extract_node('\n import six\n from enum import Enum, EnumMeta\n\n class PetEnumPy2Metaclass(six.with_metaclass(EnumMeta, Enum)): #\n DOG = "dog"\n ')
self.assertEqual(list(klass.local_attr_ancestors('DOG')), [])
def test_add_metaclass(self) -> None:
klass = builder.extract_node('\n import abc\n\n class WithMeta(object, metaclass=abc.ABCMeta):\n pass\n ')
assert isinstance(klass, nodes.ClassDef)
inferred = next(klass.infer())
metaclass = inferred.metaclass()
self.assertIsInstance(metaclass, nodes.ClassDef)
self.assertIn(metaclass.qname(), ('abc.ABCMeta', '_py_abc.ABCMeta'))
(HAS_SIX, 'These tests require the six library')
def test_using_invalid_six_add_metaclass_call(self):
klass = builder.extract_node('\n import six\n _metaclass()\n class Invalid(object):\n pass\n ')
inferred = next(klass.infer())
self.assertIsNone(inferred.metaclass())
def test_with_invalid_metaclass():
klass = extract_node('\n class InvalidAsMetaclass: ...\n\n class Invalid(metaclass=InvalidAsMetaclass()): #\n pass\n ')
inferred = next(klass.infer())
metaclass = inferred.metaclass()
assert isinstance(metaclass, Instance)
def test_nonregr_infer_callresult(self) -> None:
astroid = builder.parse('\n class Delegate(object):\n def __get__(self, obj, cls):\n return getattr(obj._subject, self.attribute)\n\n class CompositeBuilder(object):\n __call__ = Delegate()\n\n builder = CompositeBuilder(result, composite)\n tgts = builder()\n ')
instance = astroid['tgts']
self.assertEqual(list(instance.infer()), [util.Uninferable])
def test_slots(self) -> None:
astroid = builder.parse('\n from collections import deque\n from textwrap import dedent\n\n class First(object): #\n __slots__ = ("a", "b", 1)\n class Second(object): #\n __slots__ = "a"\n class Third(object): #\n __slots__ = deque(["a", "b", "c"])\n class Fourth(object): #\n __slots__ = {"a": "a", "b": "b"}\n class Fifth(object): #\n __slots__ = list\n class Sixth(object): #\n __slots__ = ""\n class Seventh(object): #\n __slots__ = dedent.__name__\n class Eight(object): #\n __slots__ = ("parens")\n class Ninth(object): #\n pass\n class Ten(object): #\n __slots__ = dict({"a": "b", "c": "d"})\n ')
expected = [('First', ('a', 'b')), ('Second', ('a',)), ('Third', None), ('Fourth', ('a', 'b')), ('Fifth', None), ('Sixth', None), ('Seventh', ('dedent',)), ('Eight', ('parens',)), ('Ninth', None), ('Ten', ('a', 'c'))]
for (cls, expected_value) in expected:
slots = astroid[cls].slots()
if (expected_value is None):
self.assertIsNone(slots)
else:
self.assertEqual(list(expected_value), [node.value for node in slots])
def test_slots_for_dict_keys(self) -> None:
module = builder.parse("\n class Issue(object):\n SlotDefaults = {'id': 0, 'id1':1}\n __slots__ = SlotDefaults.keys()\n ")
cls = module['Issue']
slots = cls.slots()
self.assertEqual(len(slots), 2)
self.assertEqual(slots[0].value, 'id')
self.assertEqual(slots[1].value, 'id1')
def test_slots_empty_list_of_slots(self) -> None:
module = builder.parse('\n class Klass(object):\n __slots__ = ()\n ')
cls = module['Klass']
self.assertEqual(cls.slots(), [])
def test_slots_taken_from_parents(self) -> None:
module = builder.parse("\n class FirstParent(object):\n __slots__ = ('a', 'b', 'c')\n class SecondParent(FirstParent):\n __slots__ = ('d', 'e')\n class Third(SecondParent):\n __slots__ = ('d', )\n ")
cls = module['Third']
slots = cls.slots()
self.assertEqual(sorted({slot.value for slot in slots}), ['a', 'b', 'c', 'd', 'e'])
def test_all_ancestors_need_slots(self) -> None:
module = builder.parse("\n class A(object):\n __slots__ = ('a', )\n class B(A): pass\n class C(B):\n __slots__ = ('a', )\n ")
cls = module['C']
self.assertIsNone(cls.slots())
cls = module['B']
self.assertIsNone(cls.slots())
def test_slots_added_dynamically_still_inferred(self) -> None:
code = '\n class NodeBase(object):\n __slots__ = "a", "b"\n\n if Options.isFullCompat():\n __slots__ += ("c",)\n\n '
node = builder.extract_node(code)
inferred = next(node.infer())
slots = inferred.slots()
assert (len(slots) == 3), slots
assert ([slot.value for slot in slots] == ['a', 'b', 'c'])
def assertEqualMro(self, klass: nodes.ClassDef, expected_mro: list[str]) -> None:
self.assertEqual([member.name for member in klass.mro()], expected_mro)
def assertEqualMroQName(self, klass: nodes.ClassDef, expected_mro: list[str]) -> None:
self.assertEqual([member.qname() for member in klass.mro()], expected_mro)
(HAS_SIX, 'These tests require the six library')
def test_with_metaclass_mro(self):
astroid = builder.parse('\n import six\n\n class C(object):\n pass\n class B(C):\n pass\n class A(six.with_metaclass(type, B)):\n pass\n ')
self.assertEqualMro(astroid['A'], ['A', 'B', 'C', 'object'])
def test_mro(self) -> None:
astroid = builder.parse('\n class C(object): pass\n class D(dict, C): pass\n\n class A1(object): pass\n class B1(A1): pass\n class C1(A1): pass\n class D1(B1, C1): pass\n class E1(C1, B1): pass\n class F1(D1, E1): pass\n class G1(E1, D1): pass\n\n class Boat(object): pass\n class DayBoat(Boat): pass\n class WheelBoat(Boat): pass\n class EngineLess(DayBoat): pass\n class SmallMultihull(DayBoat): pass\n class PedalWheelBoat(EngineLess, WheelBoat): pass\n class SmallCatamaran(SmallMultihull): pass\n class Pedalo(PedalWheelBoat, SmallCatamaran): pass\n\n class OuterA(object):\n class Inner(object):\n pass\n class OuterB(OuterA):\n class Inner(OuterA.Inner):\n pass\n class OuterC(OuterA):\n class Inner(OuterA.Inner):\n pass\n class OuterD(OuterC):\n class Inner(OuterC.Inner, OuterB.Inner):\n pass\n class Duplicates(str, str): pass\n\n ')
self.assertEqualMro(astroid['D'], ['D', 'dict', 'C', 'object'])
self.assertEqualMro(astroid['D1'], ['D1', 'B1', 'C1', 'A1', 'object'])
self.assertEqualMro(astroid['E1'], ['E1', 'C1', 'B1', 'A1', 'object'])
with self.assertRaises(InconsistentMroError) as cm:
astroid['F1'].mro()
A1 = astroid.getattr('A1')[0]
B1 = astroid.getattr('B1')[0]
C1 = astroid.getattr('C1')[0]
object_ = MANAGER.astroid_cache['builtins'].getattr('object')[0]
self.assertEqual(cm.exception.mros, [[B1, C1, A1, object_], [C1, B1, A1, object_]])
with self.assertRaises(InconsistentMroError) as cm:
astroid['G1'].mro()
self.assertEqual(cm.exception.mros, [[C1, B1, A1, object_], [B1, C1, A1, object_]])
self.assertEqualMro(astroid['PedalWheelBoat'], ['PedalWheelBoat', 'EngineLess', 'DayBoat', 'WheelBoat', 'Boat', 'object'])
self.assertEqualMro(astroid['SmallCatamaran'], ['SmallCatamaran', 'SmallMultihull', 'DayBoat', 'Boat', 'object'])
self.assertEqualMro(astroid['Pedalo'], ['Pedalo', 'PedalWheelBoat', 'EngineLess', 'SmallCatamaran', 'SmallMultihull', 'DayBoat', 'WheelBoat', 'Boat', 'object'])
self.assertEqualMro(astroid['OuterD']['Inner'], ['Inner', 'Inner', 'Inner', 'Inner', 'object'])
with self.assertRaises(DuplicateBasesError) as cm:
astroid['Duplicates'].mro()
Duplicates = astroid.getattr('Duplicates')[0]
self.assertEqual(cm.exception.cls, Duplicates)
self.assertIsInstance(cm.exception, MroError)
self.assertIsInstance(cm.exception, ResolveError)
def test_mro_with_factories(self) -> None:
cls = builder.extract_node("\n def MixinFactory(cls):\n mixin_name = '{}Mixin'.format(cls.__name__)\n mixin_bases = (object,)\n mixin_attrs = {}\n mixin = type(mixin_name, mixin_bases, mixin_attrs)\n return mixin\n class MixinA(MixinFactory(int)):\n pass\n class MixinB(MixinFactory(str)):\n pass\n class Base(object):\n pass\n class ClassA(MixinA, Base):\n pass\n class ClassB(MixinB, ClassA):\n pass\n class FinalClass(ClassB):\n def __init__(self):\n self.name = 'x'\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMro(cls, ['FinalClass', 'ClassB', 'MixinB', 'strMixin', 'ClassA', 'MixinA', 'intMixin', 'Base', 'object'])
def test_mro_with_attribute_classes(self) -> None:
cls = builder.extract_node('\n class A:\n pass\n class B:\n pass\n class Scope:\n pass\n scope = Scope()\n scope.A = A\n scope.B = B\n class C(scope.A, scope.B):\n pass\n ')
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMro(cls, ['C', 'A', 'B', 'object'])
def test_mro_generic_1(self):
cls = builder.extract_node("\n import typing\n T = typing.TypeVar('T')\n class A(typing.Generic[T]): ...\n class B: ...\n class C(A[T], B): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.C', '.A', 'typing.Generic', '.B', 'builtins.object'])
def test_mro_generic_2(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T = TypeVar('T')\n class A: ...\n class B(Generic[T]): ...\n class C(Generic[T], A, B[T]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.C', '.A', '.B', 'typing.Generic', 'builtins.object'])
def test_mro_generic_3(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T = TypeVar('T')\n class A: ...\n class B(A, Generic[T]): ...\n class C(Generic[T]): ...\n class D(B[T], C[T], Generic[T]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.D', '.B', '.A', '.C', 'typing.Generic', 'builtins.object'])
def test_mro_generic_4(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T = TypeVar('T')\n class A: ...\n class B(Generic[T]): ...\n class C(A, Generic[T], B[T]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.C', '.A', '.B', 'typing.Generic', 'builtins.object'])
def test_mro_generic_5(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T1 = TypeVar('T1')\n T2 = TypeVar('T2')\n class A(Generic[T1]): ...\n class B(Generic[T2]): ...\n class C(A[T1], B[T2]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.C', '.A', '.B', 'typing.Generic', 'builtins.object'])
def test_mro_generic_6(self):
cls = builder.extract_node("\n from typing import Generic as TGeneric, TypeVar\n T = TypeVar('T')\n class Generic: ...\n class A(Generic): ...\n class B(TGeneric[T]): ...\n class C(A, B[T]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.C', '.A', '.Generic', '.B', 'typing.Generic', 'builtins.object'])
def test_mro_generic_7(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T = TypeVar('T')\n class A(): ...\n class B(Generic[T]): ...\n class C(A, B[T]): ...\n class D: ...\n class E(C[str], D): ...\n ")
assert isinstance(cls, nodes.ClassDef)
self.assertEqualMroQName(cls, ['.E', '.C', '.A', '.B', 'typing.Generic', '.D', 'builtins.object'])
def test_mro_generic_error_1(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T1 = TypeVar('T1')\n T2 = TypeVar('T2')\n class A(Generic[T1], Generic[T2]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
with self.assertRaises(DuplicateBasesError):
cls.mro()
def test_mro_generic_error_2(self):
cls = builder.extract_node("\n from typing import Generic, TypeVar\n T = TypeVar('T')\n class A(Generic[T]): ...\n class B(A[T], A[T]): ...\n ")
assert isinstance(cls, nodes.ClassDef)
with self.assertRaises(DuplicateBasesError):
cls.mro()
def test_mro_typing_extensions(self):
module = parse('\n import abc\n import typing\n import dataclasses\n from typing import Protocol\n\n T = typing.TypeVar("T")\n\n class MyProtocol(Protocol): pass\n class EarlyBase(typing.Generic[T], MyProtocol): pass\n class Base(EarlyBase[T], abc.ABC): pass\n class Final(Base[object]): pass\n ')
class_names = ['ABC', 'Base', 'EarlyBase', 'Final', 'Generic', 'MyProtocol', 'Protocol', 'object']
final_def = module.body[(- 1)]
self.assertEqual(class_names, sorted((i.name for i in final_def.mro())))
def test_generator_from_infer_call_result_parent(self) -> None:
func = builder.extract_node('\n import contextlib\n\n \n def test(): #\n yield\n ')
assert isinstance(func, nodes.FunctionDef)
result = next(func.infer_call_result(None))
self.assertIsInstance(result, Generator)
self.assertEqual(result.parent, func)
def test_type_three_arguments(self) -> None:
classes = builder.extract_node('\n type(\'A\', (object, ), {"a": 1, "b": 2, missing: 3}) #\n ')
assert isinstance(classes, nodes.Call)
first = next(classes.infer())
self.assertIsInstance(first, nodes.ClassDef)
self.assertEqual(first.name, 'A')
self.assertEqual(first.basenames, ['object'])
self.assertIsInstance(first['a'], nodes.Const)
self.assertEqual(first['a'].value, 1)
self.assertIsInstance(first['b'], nodes.Const)
self.assertEqual(first['b'].value, 2)
with self.assertRaises(AttributeInferenceError):
first.getattr('missing')
def test_implicit_metaclass(self) -> None:
cls = builder.extract_node('\n class A(object):\n pass\n ')
assert isinstance(cls, nodes.ClassDef)
type_cls = nodes.builtin_lookup('type')[1][0]
self.assertEqual(cls.implicit_metaclass(), type_cls)
def test_implicit_metaclass_lookup(self) -> None:
cls = builder.extract_node('\n class A(object):\n pass\n ')
assert isinstance(cls, nodes.ClassDef)
instance = cls.instantiate_class()
func = cls.getattr('mro')
self.assertEqual(len(func), 1)
self.assertRaises(AttributeInferenceError, instance.getattr, 'mro')
def test_metaclass_lookup_using_same_class(self) -> None:
cls = builder.extract_node('\n class A(object): pass\n ')
assert isinstance(cls, nodes.ClassDef)
self.assertEqual(len(cls.getattr('mro')), 1)
def test_metaclass_lookup_inference_errors(self) -> None:
module = builder.parse('\n class Metaclass(type):\n foo = lala\n\n class B(object, metaclass=Metaclass): pass\n ')
cls = module['B']
self.assertEqual(util.Uninferable, next(cls.igetattr('foo')))
def test_metaclass_lookup(self) -> None:
module = builder.parse('\n class Metaclass(type):\n foo = 42\n \n def class_method(cls):\n pass\n def normal_method(cls):\n pass\n \n def meta_property(cls):\n return 42\n \n def static():\n pass\n\n class A(object, metaclass=Metaclass):\n pass\n ')
acls = module['A']
normal_attr = next(acls.igetattr('foo'))
self.assertIsInstance(normal_attr, nodes.Const)
self.assertEqual(normal_attr.value, 42)
class_method = next(acls.igetattr('class_method'))
self.assertIsInstance(class_method, BoundMethod)
self.assertEqual(class_method.bound, module['Metaclass'])
normal_method = next(acls.igetattr('normal_method'))
self.assertIsInstance(normal_method, BoundMethod)
self.assertEqual(normal_method.bound, module['A'])
property_meta = next(module['Metaclass'].igetattr('meta_property'))
self.assertIsInstance(property_meta, objects.Property)
wrapping = nodes.get_wrapping_class(property_meta)
self.assertEqual(wrapping, module['Metaclass'])
property_class = next(acls.igetattr('meta_property'))
self.assertIsInstance(property_class, nodes.Const)
self.assertEqual(property_class.value, 42)
static = next(acls.igetattr('static'))
self.assertIsInstance(static, nodes.FunctionDef)
def test_local_attr_invalid_mro(self) -> None:
cls = builder.extract_node('\n # A has an invalid MRO, local_attr should fallback\n # to using .ancestors.\n class A(object, object):\n test = 42\n class B(A): #\n pass\n ')
assert isinstance(cls, nodes.ClassDef)
local = cls.local_attr('test')[0]
inferred = next(local.infer())
self.assertIsInstance(inferred, nodes.Const)
self.assertEqual(inferred.value, 42)
def test_has_dynamic_getattr(self) -> None:
module = builder.parse('\n class Getattr(object):\n def __getattr__(self, attrname):\n pass\n\n class Getattribute(object):\n def __getattribute__(self, attrname):\n pass\n\n class ParentGetattr(Getattr):\n pass\n ')
self.assertTrue(module['Getattr'].has_dynamic_getattr())
self.assertTrue(module['Getattribute'].has_dynamic_getattr())
self.assertTrue(module['ParentGetattr'].has_dynamic_getattr())
astroid_builder = builder.AstroidBuilder()
module = astroid_builder.module_build(difflib)
self.assertFalse(module['SequenceMatcher'].has_dynamic_getattr())
def test_duplicate_bases_namedtuple(self) -> None:
module = builder.parse("\n import collections\n _A = collections.namedtuple('A', 'a')\n\n class A(_A): pass\n\n class B(A): pass\n ")
names = ['B', 'A', 'A', 'tuple', 'object']
mro = module['B'].mro()
class_names = [i.name for i in mro]
self.assertEqual(names, class_names)
def test_instance_bound_method_lambdas(self) -> None:
ast_nodes = builder.extract_node('\n class Test(object): #\n lam = lambda self: self\n not_method = lambda xargs: xargs\n Test() #\n ')
assert isinstance(ast_nodes, list)
cls = next(ast_nodes[0].infer())
self.assertIsInstance(next(cls.igetattr('lam')), nodes.Lambda)
self.assertIsInstance(next(cls.igetattr('not_method')), nodes.Lambda)
instance = next(ast_nodes[1].infer())
lam = next(instance.igetattr('lam'))
self.assertIsInstance(lam, BoundMethod)
not_method = next(instance.igetattr('not_method'))
self.assertIsInstance(not_method, nodes.Lambda)
def test_instance_bound_method_lambdas_2(self) -> None:
ast_nodes = builder.extract_node('\n def lambda_factory():\n return lambda self: print("Hello world")\n\n class MyClass(object): #\n f2 = lambda_factory()\n\n MyClass() #\n ')
assert isinstance(ast_nodes, list)
cls = next(ast_nodes[0].infer())
self.assertIsInstance(next(cls.igetattr('f2')), nodes.Lambda)
instance = next(ast_nodes[1].infer())
f2 = next(instance.igetattr('f2'))
self.assertIsInstance(f2, BoundMethod)
def test_class_extra_decorators_frame_is_not_class(self) -> None:
ast_node = builder.extract_node('\n def ala():\n def bala(): #\n func = 42\n ')
assert isinstance(ast_node, nodes.FunctionDef)
self.assertEqual(ast_node.extra_decorators, [])
def test_class_extra_decorators_only_callfunc_are_considered(self) -> None:
ast_node = builder.extract_node('\n class Ala(object):\n def func(self): #\n pass\n func = 42\n ')
self.assertEqual(ast_node.extra_decorators, [])
def test_class_extra_decorators_only_assignment_names_are_considered(self) -> None:
ast_node = builder.extract_node('\n class Ala(object):\n def func(self): #\n pass\n def __init__(self):\n self.func = staticmethod(func)\n\n ')
self.assertEqual(ast_node.extra_decorators, [])
def test_class_extra_decorators_only_same_name_considered(self) -> None:
ast_node = builder.extract_node('\n class Ala(object):\n def func(self): #\n pass\n bala = staticmethod(func)\n ')
self.assertEqual(ast_node.extra_decorators, [])
self.assertEqual(ast_node.type, 'method')
def test_class_extra_decorators(self) -> None:
(static_method, clsmethod) = builder.extract_node('\n class Ala(object):\n def static(self): #\n pass\n def class_method(self): #\n pass\n class_method = classmethod(class_method)\n static = staticmethod(static)\n ')
self.assertEqual(len(clsmethod.extra_decorators), 1)
self.assertEqual(clsmethod.type, 'classmethod')
self.assertEqual(len(static_method.extra_decorators), 1)
self.assertEqual(static_method.type, 'staticmethod')
def test_extra_decorators_only_class_level_assignments(self) -> None:
node = builder.extract_node('\n def _bind(arg):\n return arg.bind\n\n class A(object):\n \n def bind(self):\n return 42\n def irelevant(self):\n # This is important, because it used to trigger\n # a maximum recursion error.\n bind = _bind(self)\n return bind\n A() #\n ')
inferred = next(node.infer())
bind = next(inferred.igetattr('bind'))
self.assertIsInstance(bind, nodes.Const)
self.assertEqual(bind.value, 42)
parent = bind.scope()
self.assertEqual(len(parent.extra_decorators), 0)
def test_class_keywords(self) -> None:
data = "\n class TestKlass(object, metaclass=TestMetaKlass,\n foo=42, bar='baz'):\n pass\n "
astroid = builder.parse(data, __name__)
cls = astroid['TestKlass']
self.assertEqual(len(cls.keywords), 2)
self.assertEqual([x.arg for x in cls.keywords], ['foo', 'bar'])
children = list(cls.get_children())
assert (len(children) == 4)
assert isinstance(children[1], nodes.Keyword)
assert isinstance(children[2], nodes.Keyword)
assert (children[1].arg == 'foo')
assert (children[2].arg == 'bar')
def test_kite_graph(self) -> None:
data = "\n A = type('A', (object,), {})\n\n class B1(A): pass\n\n class B2(A): pass\n\n class C(B1, B2): pass\n\n class D(C):\n def update(self):\n self.hello = 'hello'\n "
builder.parse(data)
def test_singleline_docstring() -> None:
code = textwrap.dedent(" class Foo:\n '''Hello World'''\n bar = 1\n ")
node: nodes.ClassDef = builder.extract_node(code)
assert isinstance(node.doc_node, nodes.Const)
assert (node.doc_node.lineno == 2)
assert (node.doc_node.col_offset == 4)
assert (node.doc_node.end_lineno == 2)
assert (node.doc_node.end_col_offset == 21)
def test_multiline_docstring() -> None:
code = textwrap.dedent(" class Foo:\n '''Hello World\n\n Also on this line.\n '''\n bar = 1\n ")
node: nodes.ClassDef = builder.extract_node(code)
assert isinstance(node.doc_node, nodes.Const)
assert (node.doc_node.lineno == 2)
assert (node.doc_node.col_offset == 4)
assert (node.doc_node.end_lineno == 5)
assert (node.doc_node.end_col_offset == 7)
def test_without_docstring() -> None:
code = textwrap.dedent(' class Foo:\n bar = 1\n ')
node: nodes.ClassDef = builder.extract_node(code)
assert (node.doc_node is None) |
def monthly_returns(returns, annot_size=10, figsize=(10, 5), cbar=True, square=False, compounded=True, eoy=False, grayscale=False, fontname='Arial', ylabel=True, savefig=None, show=True):
return monthly_heatmap(returns=returns, annot_size=annot_size, figsize=figsize, cbar=cbar, square=square, compounded=compounded, eoy=eoy, grayscale=grayscale, fontname=fontname, ylabel=ylabel, savefig=savefig, show=show) |
def fill_statedict(state_dict, vars, size):
log_size = int(math.log(size, 2))
for i in range(8):
update(state_dict, convert_dense(vars, f'G_mapping/Dense{i}', f'style.{(i + 1)}'))
update(state_dict, {'input.input': torch.from_numpy(vars['G_synthesis/4x4/Const/const'].value().eval())})
update(state_dict, convert_torgb(vars, 'G_synthesis/4x4/ToRGB', 'to_rgb1'))
for i in range((log_size - 2)):
reso = (4 * (2 ** (i + 1)))
update(state_dict, convert_torgb(vars, f'G_synthesis/{reso}x{reso}/ToRGB', f'to_rgbs.{i}'))
update(state_dict, convert_modconv(vars, 'G_synthesis/4x4/Conv', 'conv1'))
conv_i = 0
for i in range((log_size - 2)):
reso = (4 * (2 ** (i + 1)))
update(state_dict, convert_modconv(vars, f'G_synthesis/{reso}x{reso}/Conv0_up', f'convs.{conv_i}', flip=True))
update(state_dict, convert_modconv(vars, f'G_synthesis/{reso}x{reso}/Conv1', f'convs.{(conv_i + 1)}'))
conv_i += 2
for i in range(0, (((log_size - 2) * 2) + 1)):
update(state_dict, {f'noises.noise_{i}': torch.from_numpy(vars[f'G_synthesis/noise{i}'].value().eval())})
return state_dict |
class HostLevelSharder(EmbeddingBagCollectionSharder, ModuleSharder[nn.Module]):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.TABLE_ROW_WISE.value, ShardingType.TABLE_COLUMN_WISE.value]
def compute_kernels(self, sharding_type: str, compute_device_type: str) -> List[str]:
return [EmbeddingComputeKernel.DENSE.value] |
class CheckpointReaderAdapter(object):
def __init__(self, reader):
self._reader = reader
m = self._reader.get_variable_to_shape_map()
self._map = {(k if k.endswith(':0') else (k + ':0')): v for (k, v) in six.iteritems(m)}
def get_variable_to_shape_map(self):
return self._map
def get_tensor(self, name):
if self._reader.has_tensor(name):
return self._reader.get_tensor(name)
if (name in self._map):
assert name.endswith(':0'), name
name = name[:(- 2)]
return self._reader.get_tensor(name)
def has_tensor(self, name):
return (name in self._map)
def get_real_name(self, name):
if self._reader.has_tensor(name):
return name
assert self.has_tensor(name)
return name[:(- 2)] |
def _direct_solve_discrete_lyapunov(A: 'TensorLike', Q: 'TensorLike') -> TensorVariable:
A_ = as_tensor_variable(A)
Q_ = as_tensor_variable(Q)
if ('complex' in A_.type.dtype):
AA = kron(A_, A_.conj())
else:
AA = kron(A_, A_)
X = solve((pt.eye(AA.shape[0]) - AA), Q_.ravel())
return typing.cast(TensorVariable, reshape(X, Q_.shape)) |
def init_segmentor(config, checkpoint=None, device='cuda:0'):
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError('config must be a filename or Config object, but got {}'.format(type(config)))
config.model.pretrained = None
config.model.train_cfg = None
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
if (checkpoint is not None):
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config
model.to(device)
model.eval()
return model |
class Tokenizer(ABC):
def get_input_length(self, input_text: str) -> int:
return len(self.encode(input_text))
def validate_input_length(self, prompt_token_ids: List[int], max_input_length: int):
num_input_tokens = len(prompt_token_ids)
if (num_input_tokens > max_input_length):
logger.info('Task is over the max input length.')
InputTooLong(num_input_tokens, max_input_length).raise_exception()
return prompt_token_ids
def encode_if_required(self, input_text_or_ids: Union[(str, List[int])], max_input_length: Optional[int]=None):
if isinstance(input_text_or_ids, str):
input_ids = self.encode(input_text_or_ids)
else:
input_ids = input_text_or_ids
return (self.validate_input_length(input_ids, max_input_length) if max_input_length else input_ids)
def encode(self, input_text: str) -> List[int]:
raise NotImplementedError('') |
def dice_loss(args):
(pred, gt, mask, weights) = args
pred = pred[(..., 0)]
weights = (((weights - tf.reduce_min(weights)) / (tf.reduce_max(weights) - tf.reduce_min(weights))) + 1.0)
mask = (mask * weights)
intersection = tf.reduce_sum(((pred * gt) * mask))
union = ((tf.reduce_sum((pred * mask)) + tf.reduce_sum((gt * mask))) + 1e-06)
loss = (1 - ((2.0 * intersection) / union))
return loss |
class FeatsClassStage(object):
def __init__(self):
pass
def eval(self):
return self
def encode(self, c):
info = (None, None, c)
return (c, None, info)
def decode(self, c):
return c
def get_input(self, batch: dict, keys: dict) -> dict:
out = {}
for k in keys:
if (k == 'target'):
out[k] = batch[k].unsqueeze(1)
elif (k == 'feature'):
out[k] = batch[k].float().permute(0, 2, 1)
out[k] = out[k].to(memory_format=torch.contiguous_format)
return out |
def test_no_init_nuts_compound(caplog):
with pm.Model() as model:
a = pm.Normal('a')
b = pm.Poisson('b', 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*number of samples.*', UserWarning)
pm.sample(10, tune=10)
assert ('Initializing NUTS' not in caplog.text) |
class PythonFileRunner():
def __init__(self, pycore, file_, args=None, stdin=None, stdout=None, analyze_data=None):
self.pycore = pycore
self.file = file_
self.analyze_data = analyze_data
self.observers = []
self.args = args
self.stdin = stdin
self.stdout = stdout
def run(self):
env = dict(os.environ)
file_path = self.file.real_path
path_folders = (self.pycore.project.get_source_folders() + self.pycore.project.get_python_path_folders())
env['PYTHONPATH'] = os.pathsep.join((folder.real_path for folder in path_folders))
runmod_path = self.pycore.project.find_module('rope.base.oi.runmod').real_path
self.receiver = None
self._init_data_receiving()
send_info = '-'
if self.receiver:
send_info = self.receiver.get_send_info()
args = [sys.executable, runmod_path, send_info, self.pycore.project.address, self.file.real_path]
if (self.analyze_data is None):
del args[1:4]
if (self.args is not None):
args.extend(self.args)
self.process = subprocess.Popen(executable=sys.executable, args=args, env=env, cwd=os.path.split(file_path)[0], stdin=self.stdin, stdout=self.stdout, stderr=self.stdout, close_fds=(os.name != 'nt'))
def _init_data_receiving(self):
if (self.analyze_data is None):
return
if (True or (os.name == 'nt')):
self.receiver = _SocketReceiver()
else:
self.receiver = _FIFOReceiver()
self.receiving_thread = threading.Thread(target=self._receive_information)
self.receiving_thread.daemon = True
self.receiving_thread.start()
def _receive_information(self):
for data in self.receiver.receive_data():
self.analyze_data(data)
for observer in self.observers:
observer()
def wait_process(self):
self.process.wait()
if self.analyze_data:
self.receiving_thread.join()
def kill_process(self):
if (self.process.poll() is not None):
return
with contextlib.suppress(OSError):
if hasattr(self.process, 'terminate'):
self.process.terminate()
elif (os.name != 'nt'):
os.kill(self.process.pid, 9)
else:
import ctypes
handle = int(self.process._handle)
ctypes.windll.kernel32.TerminateProcess(handle, (- 1))
def add_finishing_observer(self, observer):
self.observers.append(observer) |
class Linear(torch.nn.Linear):
def __init__(self, *args, **kwargs):
super(Linear, self).__init__(*args, **kwargs)
def forward(self, input: Tensor) -> Tensor:
if (input.is_cuda and (linear_function is not None) and (self.bias is not None)):
return linear_function(input, self.weight, self.bias)
else:
return torch.nn.functional.linear(input, self.weight, self.bias) |
def _get_datetime(instant: _Instant) -> datetime.datetime:
if (instant is None):
return datetime.datetime.now(UTC).replace(tzinfo=None)
elif isinstance(instant, (int, float)):
return datetime.datetime.fromtimestamp(instant, UTC).replace(tzinfo=None)
elif isinstance(instant, datetime.time):
return datetime.datetime.combine(datetime.date.today(), instant)
elif (isinstance(instant, datetime.date) and (not isinstance(instant, datetime.datetime))):
return datetime.datetime.combine(instant, datetime.time())
return instant |
def test_chrono_duration_roundtrip():
date1 = datetime.datetime.today()
date2 = datetime.datetime.today()
diff = (date2 - date1)
assert isinstance(diff, datetime.timedelta)
cpp_diff = m.test_chrono3(diff)
assert (cpp_diff.days == diff.days)
assert (cpp_diff.seconds == diff.seconds)
assert (cpp_diff.microseconds == diff.microseconds) |
class TestStickerSetWithoutRequest(TestStickerSetBase):
def test_slot_behaviour(self):
inst = StickerSet('this', 'is', True, self.stickers, True, 'not')
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_de_json(self, bot, sticker):
name = f'test_by_{bot.username}'
json_dict = {'name': name, 'title': self.title, 'is_animated': self.is_animated, 'is_video': self.is_video, 'stickers': [x.to_dict() for x in self.stickers], 'thumbnail': sticker.thumbnail.to_dict(), 'sticker_type': self.sticker_type, 'contains_masks': self.contains_masks}
sticker_set = StickerSet.de_json(json_dict, bot)
assert (sticker_set.name == name)
assert (sticker_set.title == self.title)
assert (sticker_set.is_animated == self.is_animated)
assert (sticker_set.is_video == self.is_video)
assert (sticker_set.stickers == tuple(self.stickers))
assert (sticker_set.thumbnail == sticker.thumbnail)
assert (sticker_set.sticker_type == self.sticker_type)
assert (sticker_set.api_kwargs == {'contains_masks': self.contains_masks})
def test_sticker_set_to_dict(self, sticker_set):
sticker_set_dict = sticker_set.to_dict()
assert isinstance(sticker_set_dict, dict)
assert (sticker_set_dict['name'] == sticker_set.name)
assert (sticker_set_dict['title'] == sticker_set.title)
assert (sticker_set_dict['is_animated'] == sticker_set.is_animated)
assert (sticker_set_dict['is_video'] == sticker_set.is_video)
assert (sticker_set_dict['stickers'][0] == sticker_set.stickers[0].to_dict())
assert (sticker_set_dict['thumbnail'] == sticker_set.thumbnail.to_dict())
assert (sticker_set_dict['sticker_type'] == sticker_set.sticker_type)
def test_equality(self):
a = StickerSet(self.name, self.title, self.is_animated, self.stickers, self.is_video, self.sticker_type)
b = StickerSet(self.name, self.title, self.is_animated, self.stickers, self.is_video, self.sticker_type)
c = StickerSet(self.name, 'title', False, [], True, Sticker.CUSTOM_EMOJI)
d = StickerSet('blah', self.title, self.is_animated, self.stickers, self.is_video, self.sticker_type)
e = Audio(self.name, '', 0, None, None)
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a == c)
assert (hash(a) == hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
.parametrize('local_mode', [True, False])
async def test_upload_sticker_file_local_files(self, monkeypatch, bot, chat_id, local_mode, recwarn):
try:
bot._local_mode = local_mode
test_flag = False
file = data_file('telegram.jpg')
expected = file.as_uri()
async def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
test_flag = ((data.get('sticker') == expected) if local_mode else isinstance(data.get('sticker'), InputFile))
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.upload_sticker_file(chat_id, sticker=file, sticker_format='static'))
assert test_flag
finally:
bot._local_mode = False
.parametrize('local_mode', [True, False])
async def test_create_new_sticker_set_local_files(self, monkeypatch, bot, chat_id, local_mode):
monkeypatch.setattr(bot, '_local_mode', local_mode)
test_flag = False
file = data_file('telegram.jpg')
expected = file.as_uri()
async def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
test_flag = (data.get('stickers')[0].sticker == expected)
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.create_new_sticker_set(chat_id, 'name', 'title', stickers=[InputSticker(file, emoji_list=['emoji'])], sticker_format=StickerFormat.STATIC))
assert test_flag
async def test_create_new_sticker_all_params(self, monkeypatch, bot, chat_id, mask_position):
async def make_assertion(_, data, *args, **kwargs):
assert (data['user_id'] == chat_id)
assert (data['name'] == 'name')
assert (data['title'] == 'title')
assert (data['stickers'] == ['wow.png', 'wow.tgs', 'wow.webp'])
assert (data['sticker_format'] == 'static')
assert (data['needs_repainting'] is True)
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.create_new_sticker_set(chat_id, 'name', 'title', stickers=['wow.png', 'wow.tgs', 'wow.webp'], sticker_format=StickerFormat.STATIC, needs_repainting=True))
.parametrize('local_mode', [True, False])
async def test_add_sticker_to_set_local_files(self, monkeypatch, bot, chat_id, local_mode):
monkeypatch.setattr(bot, '_local_mode', local_mode)
test_flag = False
file = data_file('telegram.jpg')
expected = file.as_uri()
async def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
test_flag = (data.get('sticker').sticker == expected)
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.add_sticker_to_set(chat_id, 'name', sticker=InputSticker(sticker=file, emoji_list=['this'])))
assert test_flag
.parametrize('local_mode', [True, False])
async def test_set_sticker_set_thumbnail_local_files(self, monkeypatch, bot, chat_id, local_mode):
try:
bot._local_mode = local_mode
test_flag = False
file = data_file('telegram.jpg')
expected = file.as_uri()
async def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
if local_mode:
test_flag = (data.get('thumbnail') == expected)
else:
test_flag = isinstance(data.get('thumbnail'), InputFile)
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.set_sticker_set_thumbnail('name', chat_id, thumbnail=file))
assert test_flag
finally:
bot._local_mode = False
async def test_get_file_instance_method(self, monkeypatch, sticker):
async def make_assertion(*_, **kwargs):
return (kwargs['file_id'] == sticker.file_id)
assert check_shortcut_signature(Sticker.get_file, Bot.get_file, ['file_id'], [])
assert (await check_shortcut_call(sticker.get_file, sticker.get_bot(), 'get_file'))
assert (await check_defaults_handling(sticker.get_file, sticker.get_bot()))
monkeypatch.setattr(sticker.get_bot(), 'get_file', make_assertion)
assert (await sticker.get_file()) |
class ShakeDrop(torch.autograd.Function):
def forward(ctx, x, b, alpha):
y = (((b + alpha) - (b * alpha)) * x)
ctx.save_for_backward(b)
return y
def backward(ctx, dy):
beta = torch.rand(dy.size(0), dtype=dy.dtype, device=dy.device).view((- 1), 1, 1, 1)
(b,) = ctx.saved_tensors
return ((((b + beta) - (b * beta)) * dy), None, None) |
def show_compilers():
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append((('compiler=' + compiler), None, compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help('List of available compilers:') |
def test_docs_examples():
expr = re.compile('\n!!! tab examples "pyproject.toml"\n\\s*\n\\s*```toml\n(.*?)```', (re.MULTILINE | re.DOTALL))
txt = DIR.parent.joinpath('docs/options.md').read_text()
blocks: list[str] = []
for match in expr.finditer(txt):
lines = (line.strip() for line in match.group(1).strip().splitlines() if line.strip())
block: list[str] = []
header = ''
for line in lines:
if line.startswith(('[tool.cibuildwheel', '[[tool.cibuildwheel')):
header = line
elif line.startswith('#'):
if block:
blocks.append('\n'.join([header, *block]))
block = []
elif ((' = ' in line) and any((x.startswith(line.partition(' = ')[0]) for x in block))):
blocks.append('\n'.join([header, *block]))
block = [line]
else:
block.append(line)
blocks.append('\n'.join([header, *block]))
for example_txt in blocks:
print(example_txt)
print()
example = tomllib.loads(example_txt)
validator = validate_pyproject.api.Validator()
assert (validator(example) is not None) |
class BasicConvolutionBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, use_ln=False):
super().__init__()
self.net = nn.Sequential(spnn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, transposed=False), build_sparse_norm(out_channels, use_ln), spnn.ReLU(True))
def forward(self, x):
return self.net(x) |
class Effect4936(BaseEffect):
runTime = 'late'
type = 'active'
def handler(fit, module, context, projectionRange, **kwargs):
amount = module.getModifiedItemAttr('shieldBonus')
speed = (module.getModifiedItemAttr('duration') / 1000.0)
fit.extraAttributes.increase('shieldRepair', (amount / speed), **kwargs) |
def timeout_timer(item, settings):
if ((not settings.disable_debugger_detection) and is_debugging()):
return
try:
capman = item.config.pluginmanager.getplugin('capturemanager')
if capman:
capman.suspend_global_capture(item)
(stdout, stderr) = capman.read_global_capture()
else:
(stdout, stderr) = (None, None)
write_title('Timeout', sep='+')
caplog = item.config.pluginmanager.getplugin('_capturelog')
if (caplog and hasattr(item, 'capturelog_handler')):
log = item.capturelog_handler.stream.getvalue()
if log:
write_title('Captured log')
write(log)
if stdout:
write_title('Captured stdout')
write(stdout)
if stderr:
write_title('Captured stderr')
write(stderr)
dump_stacks()
write_title('Timeout', sep='+')
except Exception:
traceback.print_exc()
finally:
sys.stdout.flush()
sys.stderr.flush()
os._exit(1) |
def _delete_file_or_dir(base_dir, name, struct):
fullname = os.path.join(base_dir, name)
set_path = SetPath(fullname, struct)
try:
if (set_path.get_type() == 'directory'):
_rmtree(fullname)
else:
os.remove(fullname)
except FileNotFoundError:
pass
except IsADirectoryError:
_rmtree(fullname)
except NotADirectoryError:
os.remove(fullname) |
class DescribeNumberingPart():
def it_provides_access_to_the_numbering_definitions(self, num_defs_fixture):
(numbering_part, _NumberingDefinitions_, numbering_elm_, numbering_definitions_) = num_defs_fixture
numbering_definitions = numbering_part.numbering_definitions
_NumberingDefinitions_.assert_called_once_with(numbering_elm_)
assert (numbering_definitions is numbering_definitions_)
def num_defs_fixture(self, _NumberingDefinitions_, numbering_elm_, numbering_definitions_):
numbering_part = NumberingPart(None, None, numbering_elm_, None)
return (numbering_part, _NumberingDefinitions_, numbering_elm_, numbering_definitions_)
def _NumberingDefinitions_(self, request, numbering_definitions_):
return class_mock(request, 'docx.parts.numbering._NumberingDefinitions', return_value=numbering_definitions_)
def numbering_definitions_(self, request):
return instance_mock(request, _NumberingDefinitions)
def numbering_elm_(self, request):
return instance_mock(request, CT_Numbering) |
class InitializationArguments():
config_name: Optional[str] = field(default='gpt2-large', metadata={'help': 'Configuration to use for model initialization.'})
tokenizer_name: Optional[str] = field(default='codeparrot/codeparrot', metadata={'help': 'Tokenizer attached to model.'})
model_name: Optional[str] = field(default='codeparrot', metadata={'help': 'Name of the created model.'})
push_to_hub: Optional[bool] = field(default=True, metadata={'help': 'Push saved tokenizer to the hub.'}) |
(derivate=True, coderize=True)
_loss
def smooth_l1_loss(pred, target, beta=1.0):
assert (beta > 0)
assert ((pred.size() == target.size()) and (target.numel() > 0))
diff = torch.abs((pred - target))
loss = torch.where((diff < beta), (((0.5 * diff) * diff) / beta), (diff - (0.5 * beta)))
return loss |
class SparsemaxLoss(nn.Module):
def __init__(self, weight=None, ignore_index=(- 100), reduction='elementwise_mean'):
assert (reduction in ['elementwise_mean', 'sum', 'none'])
self.reduction = reduction
self.weight = weight
self.ignore_index = ignore_index
super(SparsemaxLoss, self).__init__()
def forward(self, input, target):
loss = sparsemax_loss(input, target)
if (self.ignore_index >= 0):
ignored_positions = (target == self.ignore_index)
size = float((target.size(0) - ignored_positions.sum()).item())
loss.masked_fill_(ignored_positions, 0.0)
else:
size = float(target.size(0))
if (self.reduction == 'sum'):
loss = loss.sum()
elif (self.reduction == 'elementwise_mean'):
loss = (loss.sum() / size)
return loss |
def test_cp38_arm64_testing_universal2_installer(tmp_path, capfd, request):
if (not request.config.getoption('--run-cp38-universal2')):
pytest.skip('needs --run-cp38-universal2 option to run')
project_dir = (tmp_path / 'project')
basic_project.generate(project_dir)
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={'CIBW_BUILD': 'cp38-*', 'CIBW_TEST_COMMAND': 'python -c "import platform; print(\'running tests on \' + platform.machine())"', 'CIBW_ARCHS': 'x86_64,universal2,arm64', 'MACOSX_DEPLOYMENT_TARGET': '11.0'})
captured = capfd.readouterr()
assert ('running tests on x86_64' in captured.out)
assert ('running tests on arm64' in captured.out)
warning_message = 'While cibuildwheel can build CPython 3.8 universal2/arm64 wheels, we cannot test the arm64 part of them'
assert (warning_message not in captured.err)
expected_wheels = [w.replace('10_9', '11_0') for w in ALL_MACOS_WHEELS if ('cp38' in w)]
assert (set(actual_wheels) == set(expected_wheels)) |
def test_class_scope_dependencies(item_names_for, order_dependencies):
tests_content = '\n import pytest\n\n class TestA:\n .dependency(depends=["test_c"], scope=\'class\')\n def test_a(self):\n assert True\n\n def test_b(self):\n assert True\n\n .dependency\n def test_c(self):\n assert True\n '
assert (item_names_for(tests_content) == ['TestA::test_b', 'TestA::test_c', 'TestA::test_a']) |
class FY4Base(HDF5FileHandler):
def __init__(self, filename, filename_info, filetype_info):
super(FY4Base, self).__init__(filename, filename_info, filetype_info)
self.sensor = filename_info['instrument']
self._COFF_list = [21983.5, 10991.5, 5495.5, 2747.5, 1373.5]
self._LOFF_list = [21983.5, 10991.5, 5495.5, 2747.5, 1373.5]
self._CFAC_list = [.0, .0, .0, .0, .0]
self._LFAC_list = [.0, .0, .0, .0, .0]
self.PLATFORM_NAMES = {'FY4A': 'FY-4A', 'FY4B': 'FY-4B', 'FY4C': 'FY-4C'}
try:
self.PLATFORM_ID = self.PLATFORM_NAMES[filename_info['platform_id']]
except KeyError:
raise KeyError(f"Unsupported platform ID: {filename_info['platform_id']}")
self.CHANS_ID = 'NOMChannel'
self.SAT_ID = 'NOMSatellite'
self.SUN_ID = 'NOMSun'
def scale(dn, slope, offset):
ref = ((dn * slope) + offset)
ref = ref.clip(min=0)
ref.attrs = dn.attrs
return ref
def apply_lut(self, data, lut):
lut = np.append(lut, np.nan)
data.data = da.where((data.data > lut.shape[0]), (lut.shape[0] - 1), data.data)
res = data.data.map_blocks(self._getitem, lut, dtype=lut.dtype)
res = xr.DataArray(res, dims=data.dims, attrs=data.attrs, coords=data.coords)
return res
def _getitem(block, lut):
return lut[block]
_property
def reflectance_coeffs(self):
if (self.PLATFORM_ID == 'FY-4A'):
cal_coef = 'CALIBRATION_COEF(SCALE+OFFSET)'
elif (self.PLATFORM_ID == 'FY-4B'):
cal_coef = 'Calibration/CALIBRATION_COEF(SCALE+OFFSET)'
else:
raise KeyError(f'Unsupported platform ID for calibration: {self.PLATFORM_ID}')
return self.get(cal_coef).values
def calibrate(self, data, ds_info, ds_name, file_key):
calibration = ds_info.get('calibration')
if (calibration in ('counts', None)):
data.attrs['units'] = ds_info['units']
ds_info['valid_range'] = data.attrs['valid_range']
ds_info['fill_value'] = data.attrs['FillValue'].item()
elif (calibration == 'reflectance'):
channel_index = (int(file_key[(- 2):]) - 1)
data = self.calibrate_to_reflectance(data, channel_index, ds_info)
elif (calibration == 'brightness_temperature'):
data = self.calibrate_to_bt(data, ds_info, ds_name)
elif (calibration == 'radiance'):
raise NotImplementedError('Calibration to radiance is not supported.')
if (calibration != 'counts'):
data = data.where(((data >= min(data.attrs['valid_range'])) & (data <= max(data.attrs['valid_range']))))
else:
data.attrs['_FillValue'] = data.attrs['FillValue'].item()
return data
def calibrate_to_reflectance(self, data, channel_index, ds_info):
logger.debug('Calibrating to reflectances')
if ((self.sensor != 'AGRI') and (self.sensor != 'GHI')):
raise ValueError(f'Unsupported sensor type: {self.sensor}')
coeffs = self.reflectance_coeffs
num_channel = coeffs.shape[0]
if ((self.sensor == 'AGRI') and (num_channel == 1)):
channel_index = 0
data.data = da.where((data.data == data.attrs['FillValue'].item()), np.nan, data.data)
data.attrs['scale_factor'] = coeffs[(channel_index, 0)].item()
data.attrs['add_offset'] = coeffs[(channel_index, 1)].item()
data = self.scale(data, data.attrs['scale_factor'], data.attrs['add_offset'])
data *= 100
ds_info['valid_range'] = ((data.attrs['valid_range'] * data.attrs['scale_factor']) + data.attrs['add_offset'])
ds_info['valid_range'] = (ds_info['valid_range'] * 100)
return data
def calibrate_to_bt(self, data, ds_info, ds_name):
logger.debug('Calibrating to brightness_temperature')
if (self.sensor not in ['GHI', 'AGRI']):
raise ValueError('Error, sensor must be GHI or AGRI.')
lut_key = ds_info.get('lut_key', ds_name)
try:
lut = self[lut_key]
except KeyError:
lut_key = f"Calibration/{ds_info.get('lut_key', ds_name)}"
lut = self[lut_key]
data = self.apply_lut(data, lut)
ds_info['valid_range'] = lut.attrs['valid_range']
return data
def start_time(self):
start_time = (((self['/attr/Observing Beginning Date'] + 'T') + self['/attr/Observing Beginning Time']) + 'Z')
try:
return datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%SZ')
def end_time(self):
end_time = (((self['/attr/Observing Ending Date'] + 'T') + self['/attr/Observing Ending Time']) + 'Z')
try:
return datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%SZ')
def get_area_def(self, key):
res = key['resolution']
pdict = {}
begin_cols = float(self.file_content['/attr/Begin Pixel Number'])
end_lines = float(self.file_content['/attr/End Line Number'])
pdict['coff'] = ((self._COFF_list[RESOLUTION_LIST.index(res)] - begin_cols) + 1)
pdict['loff'] = (((- self._LOFF_list[RESOLUTION_LIST.index(res)]) + end_lines) + 1)
pdict['cfac'] = self._CFAC_list[RESOLUTION_LIST.index(res)]
pdict['lfac'] = self._LFAC_list[RESOLUTION_LIST.index(res)]
try:
pdict['a'] = float(self.file_content['/attr/Semimajor axis of ellipsoid'])
except KeyError:
pdict['a'] = float(self.file_content['/attr/dEA'])
if (pdict['a'] < 10000):
pdict['a'] = (pdict['a'] * 1000.0)
try:
pdict['b'] = float(self.file_content['/attr/Semiminor axis of ellipsoid'])
except KeyError:
pdict['b'] = (pdict['a'] * (1 - (1 / self.file_content['/attr/dObRecFlat'])))
pdict['h'] = self.file_content['/attr/NOMSatHeight']
if (pdict['h'] > .0):
pdict['h'] = (pdict['h'] - pdict['a'])
pdict['ssp_lon'] = float(self.file_content['/attr/NOMCenterLon'])
pdict['nlines'] = float(self.file_content['/attr/RegLength'])
pdict['ncols'] = float(self.file_content['/attr/RegWidth'])
pdict['scandir'] = 'N2S'
pdict['a_desc'] = 'FY-4 {} area'.format(self.filename_info['observation_type'])
pdict['a_name'] = f"{self.filename_info['observation_type']}_{res}m"
pdict['p_id'] = f'FY-4, {res}m'
area_extent = get_area_extent(pdict)
area_extent = (area_extent[0], area_extent[1], area_extent[2], area_extent[3])
area = get_area_definition(pdict, area_extent)
return area |
def test_tcn_backbone():
with pytest.raises(AssertionError):
TCN(in_channels=34, num_blocks=3, kernel_sizes=(3, 3, 3))
with pytest.raises(AssertionError):
TCN(in_channels=34, kernel_sizes=(3, 4, 3))
model = TCN(in_channels=34, num_blocks=2, kernel_sizes=(3, 3, 3))
pose2d = torch.rand((2, 34, 243))
feat = model(pose2d)
assert (len(feat) == 2)
assert (feat[0].shape == (2, 1024, 235))
assert (feat[1].shape == (2, 1024, 217))
max_norm = 0.1
model = TCN(in_channels=34, num_blocks=4, kernel_sizes=(3, 3, 3, 3, 3), max_norm=max_norm)
pose2d = torch.rand((2, 34, 243))
feat = model(pose2d)
assert (len(feat) == 4)
assert (feat[0].shape == (2, 1024, 235))
assert (feat[1].shape == (2, 1024, 217))
assert (feat[2].shape == (2, 1024, 163))
assert (feat[3].shape == (2, 1024, 1))
for module in model.modules():
if isinstance(module, torch.nn.modules.conv._ConvNd):
norm = module.weight.norm().item()
np.testing.assert_allclose(np.maximum(norm, max_norm), max_norm, rtol=0.0001)
model = TCN(in_channels=34, num_blocks=4, kernel_sizes=(3, 3, 3, 3, 3), use_stride_conv=True)
pose2d = torch.rand((2, 34, 243))
feat = model(pose2d)
assert (len(feat) == 4)
assert (feat[0].shape == (2, 1024, 27))
assert (feat[1].shape == (2, 1024, 9))
assert (feat[2].shape == (2, 1024, 3))
assert (feat[3].shape == (2, 1024, 1))
model1 = TCN(in_channels=34, stem_channels=4, num_blocks=1, kernel_sizes=(3, 3), dropout=0, residual=False, norm_cfg=None)
model2 = TCN(in_channels=34, stem_channels=4, num_blocks=1, kernel_sizes=(3, 3), dropout=0, residual=False, norm_cfg=None, use_stride_conv=True)
for m in model1.modules():
if isinstance(m, nn.Conv1d):
nn.init.constant_(m.weight, 0.5)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
for m in model2.modules():
if isinstance(m, nn.Conv1d):
nn.init.constant_(m.weight, 0.5)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
input1 = torch.rand((1, 34, 9))
input2 = input1.clone()
outputs1 = model1(input1)
outputs2 = model2(input2)
for (output1, output2) in zip(outputs1, outputs2):
assert torch.isclose(output1, output2).all()
criterion = nn.MSELoss()
target = torch.rand(output1.shape)
loss1 = criterion(output1, target)
loss2 = criterion(output2, target)
loss1.backward()
loss2.backward()
for (m1, m2) in zip(model1.modules(), model2.modules()):
if isinstance(m1, nn.Conv1d):
assert torch.isclose(m1.weight.grad, m2.weight.grad).all() |
def main():
data = sys.argv[1].encode('utf-8')
print(f'Compressing data: {data}')
compressor = brotli.Compressor(mode=brotli.MODE_TEXT)
compressed = (compressor.process(data) + compressor.finish())
print(f'Compressed data: {compressed}')
decompressor = brotli.Decompressor()
decompressed = (decompressor.process(compressed) + decompressor.finish())
print(f'Decompressed data: {decompressed}') |
def runScript(N):
script = 'elemwise_time_test.py'
path = os.path.dirname(os.path.abspath(__file__))
proc = subprocess.Popen(['python', script, '--script', '-N', str(N)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path)
(out, err) = proc.communicate()
if err:
print(err)
sys.exit()
return list(map(float, out.decode(console_encoding).split(' '))) |
def run(config):
config['drop_last'] = False
loaders = utils.get_data_loaders(**config)
net = inception_utils.load_inception_net(parallel=config['parallel'])
(pool, logits, labels) = ([], [], [])
device = 'cuda'
for (i, (x, y)) in enumerate(tqdm(loaders[0])):
try:
x = x.to(device)
with torch.no_grad():
(pool_val, logits_val) = net(x)
pool += [np.asarray(pool_val.cpu())]
logits += [np.asarray(F.softmax(logits_val, 1).cpu())]
labels += [np.asarray(y.cpu())]
except Exception as e:
x = x.to(device)
with torch.no_grad():
(pool_val, logits_val) = net(x)
pool += [np.asarray(pool_val.cpu())]
logits += [np.asarray(F.softmax(logits_val, 1).cpu())]
labels += [np.asarray(y.cpu())]
(pool, logits, labels) = [np.concatenate(item, 0) for item in [pool, logits, labels]]
print('Saving pool, logits, and labels to disk...')
np.savez((config['dataset'] + '_inception_activations.npz'), {'pool': pool, 'logits': logits, 'labels': labels})
print('Calculating inception metrics...')
(IS_mean, IS_std) = inception_utils.calculate_inception_score(logits)
print(('Training data from dataset %s has IS of %5.5f +/- %5.5f' % (config['dataset'], IS_mean, IS_std)))
print('Calculating means and covariances...')
(mu, sigma) = (np.mean(pool, axis=0), np.cov(pool, rowvar=False))
print('Saving calculated means and covariances to disk...')
np.savez((config['dataset'].strip('_hdf5') + '_inception_moments.npz'), **{'mu': mu, 'sigma': sigma}) |
def get_mock_cfg(finetune_from_model):
cfg_mock = OmegaConf.create({'checkpoint': {'optimizer_overrides': '{}', 'reset_dataloader': False, 'reset_meters': False, 'reset_optimizer': False, 'reset_lr_scheduler': False, 'finetune_from_model': finetune_from_model, 'model_parallel_size': 1}, 'common': {'model_parallel_size': 1}})
return cfg_mock |
def _resnet(arch: str, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], pretrained: bool, progress: bool, num_classes: int, **kwargs: Any):
model = ResNet(block, layers, **kwargs, num_classes=num_classes)
print('num_classes = ', num_classes)
if pretrained:
print('model use imagenet pretained!')
loaded_state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
del loaded_state_dict['fc.weight']
del loaded_state_dict['fc.bias']
state_dict = model.state_dict()
for k in state_dict.keys():
if (k in loaded_state_dict):
state_dict[k] = loaded_state_dict[k]
model.load_state_dict(state_dict)
return model |
def randomunitarieswom(qnnarchwom):
units = []
for i in range(1, len(qnnarchwom)):
qubitnumberin = qnnarchwom[(i - 1)]
qubitnumberout = qnnarchwom[i]
unitlayer = []
for j in range(qubitnumberout):
unit = randomunitary((qubitnumberin + 1))
if (qubitnumberout != 1):
unit = qt.tensor(unit, tensoredId((qubitnumberout - 1)))
unit = swappedOp(unit, qubitnumberin, (qubitnumberin + j))
unitlayer.append(unit)
units.append(unitlayer)
return units |
class Vocab(object):
def __init__(self, vocab_file, max_size):
self._word_to_id = {}
self._id_to_word = {}
self._count = 0
for w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
with open(vocab_file, 'r', encoding='utf8') as vocab_f:
for line in vocab_f:
pieces = line.split()
if (len(pieces) != 2):
print(('Warning: incorrectly formatted line in vocabulary file: %s\n' % line))
continue
w = pieces[0]
if (w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]):
raise Exception(("<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn't be in the vocab file, but %s is" % w))
if (w in self._word_to_id):
raise Exception(('Duplicated word in vocabulary file: %s' % w))
self._word_to_id[w] = self._count
self._id_to_word[self._count] = w
self._count += 1
if ((max_size != 0) and (self._count >= max_size)):
break
def word2id(self, word):
if (word not in self._word_to_id):
return self._word_to_id[UNKNOWN_TOKEN]
return self._word_to_id[word]
def id2word(self, word_id):
if (word_id not in self._id_to_word):
raise ValueError(('Id not found in vocab: %d' % word_id))
return self._id_to_word[word_id]
def size(self):
return self._count
def write_metadata(self, fpath):
print(('Writing word embedding metadata file to %s...' % fpath))
with open(fpath, 'w') as f:
fieldnames = ['word']
writer = csv.DictWriter(f, delimiter='\t', fieldnames=fieldnames)
for i in range(self.size()):
writer.writerow({'word': self._id_to_word[i]}) |
.parametrize('A_parts, indices', [((np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3))), (slice(2, 3), np.array([0, 1, 2]), 1)), ((np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3))), (slice(2, 3), 1, np.array([0, 1, 2]))), ((np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3))), (1, slice(2, 3), np.array([0, 1, 2]))), ((np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3)), np.random.normal(size=(4, 3))), (np.random.randint(2, size=(4, 3)), 1, 0))])
def test_expand_indices_single_indices(A_parts, indices):
A = pt.stack(A_parts)
at_indices = [as_index_constant(idx) for idx in indices]
full_indices = expand_indices(at_indices, shape_tuple(A))
assert (len(full_indices) == A.ndim)
exp_res = A[indices].eval()
res = A[full_indices].eval()
assert np.array_equal(res, exp_res) |
def convert_options(settings, defaults=None):
if (defaults is None):
defaults = {}
if isinstance(settings, dict):
def getopt(key, default=None):
return settings.get(('SENTRY_%s' % key.upper()), defaults.get(key, default))
options = copy.copy((settings.get('SENTRY_CONFIG') or settings.get('RAVEN_CONFIG') or {}))
else:
def getopt(key, default=None):
return getattr(settings, ('SENTRY_%s' % key.upper()), defaults.get(key, default))
options = copy.copy((getattr(settings, 'SENTRY_CONFIG', None) or getattr(settings, 'RAVEN_CONFIG', None) or {}))
options.setdefault('include_paths', getopt('include_paths', []))
options.setdefault('exclude_paths', getopt('exclude_paths', []))
options.setdefault('timeout', getopt('timeout'))
options.setdefault('name', getopt('name'))
options.setdefault('auto_log_stacks', getopt('auto_log_stacks'))
options.setdefault('string_max_length', getopt('string_max_length'))
options.setdefault('list_max_length', getopt('list_max_length'))
options.setdefault('site', getopt('site'))
options.setdefault('processors', getopt('processors'))
options.setdefault('sanitize_keys', getopt('sanitize_keys'))
options.setdefault('dsn', getopt('dsn', os.environ.get('SENTRY_DSN')))
options.setdefault('context', getopt('context'))
options.setdefault('tags', getopt('tags'))
options.setdefault('release', getopt('release'))
options.setdefault('repos', getopt('repos'))
options.setdefault('environment', getopt('environment'))
options.setdefault('ignore_exceptions', getopt('ignore_exceptions'))
options.setdefault('sample_rate', getopt('sample_rate'))
transport = (getopt('transport') or options.get('transport'))
if isinstance(transport, string_types):
transport = import_string(transport)
options['transport'] = transport
return options |
class DF4C(DF):
def build(self):
log = logger.Logger(self.stdout, self.verbose)
mol = self.mol
auxmol = self.auxmol = addons.make_auxmol(self.mol, self.auxbasis)
n2c = mol.nao_2c()
naux = auxmol.nao_nr()
nao_pair = ((n2c * (n2c + 1)) // 2)
max_memory = ((self.max_memory - lib.current_memory()[0]) * 0.8)
if ((((((nao_pair * naux) * 3) * 16) / 1000000.0) * 2) < max_memory):
self._cderi = (r_incore.cholesky_eri(mol, auxmol=auxmol, aosym='s2', int3c='int3c2e_spinor', verbose=log), r_incore.cholesky_eri(mol, auxmol=auxmol, aosym='s2', int3c='int3c2e_spsp1_spinor', verbose=log))
else:
raise NotImplementedError
return self
def loop(self, blksize=None):
if (self._cderi is None):
self.build()
if (blksize is None):
blksize = self.blockdim
with addons.load(self._cderi[0], self._dataname) as ferill:
naoaux = ferill.shape[0]
with addons.load(self._cderi[1], self._dataname) as feriss:
for (b0, b1) in self.prange(0, naoaux, blksize):
erill = numpy.asarray(ferill[b0:b1], order='C')
eriss = numpy.asarray(feriss[b0:b1], order='C')
(yield (erill, eriss))
def get_jk(self, dm, hermi=1, with_j=True, with_k=True, direct_scf_tol=getattr(__config__, 'scf_hf_SCF_direct_scf_tol', 1e-13), omega=None):
if (omega is None):
return df_jk.r_get_jk(self, dm, hermi, with_j, with_k)
with self.range_coulomb(omega) as rsh_df:
return df_jk.r_get_jk(rsh_df, dm, hermi, with_j, with_k)
def ao2mo(self, mo_coeffs):
raise NotImplementedError |
.linux
.parametrize('url', ['/foo.html', 'file:///foo.html'])
_locale
def test_open_with_ascii_locale(request, server, tmp_path, quteproc_new, url):
args = (['--temp-basedir'] + _base_args(request.config))
quteproc_new.start(args, env={'LC_ALL': 'C'})
quteproc_new.set_setting('url.auto_search', 'never')
quteproc_new.send_cmd(':open {}'.format(url))
if (not request.config.webengine):
line = quteproc_new.wait_for(message='Error while loading *: Error opening /*: No such file or directory')
line.expected = True
quteproc_new.wait_for(message="load status for <* tab_id=* url='*/f%C3%B6%C3%B6.html'>: LoadStatus.error")
if request.config.webengine:
line = quteproc_new.wait_for(message='Load error: ERR_FILE_NOT_FOUND')
line.expected = True |
def main(args):
wav_scp = codecs.open((Path(args.path) / 'wav.scp'), 'r', 'utf-8')
textgrid_flist = codecs.open((Path(args.path) / 'textgrid_new.flist'), 'r', 'utf-8')
utt2textgrid = {}
for line in textgrid_flist:
line_array = line.strip().split(' ')
path = Path(line_array[1])
uttid = line_array[0]
utt2textgrid[uttid] = path
all_segments = []
for line in wav_scp:
uttid = line.strip().split(' ')[0]
uttid_part = uttid
if (args.mars == True):
uttid_list = uttid.split('_')
uttid_part = ((uttid_list[0] + '_') + uttid_list[1])
if (uttid_part not in utt2textgrid):
print(("%s doesn't have transcription" % uttid))
continue
segments = []
try:
tg = textgrid.TextGrid.fromFile(utt2textgrid[uttid_part])
except:
pdb.set_trace()
for i in range(tg.__len__()):
for j in range(tg[i].__len__()):
if tg[i][j].mark:
segments.append(Segment(uttid, tg[i].name, tg[i][j].minTime, tg[i][j].maxTime, tg[i][j].mark.strip()))
segments = sorted(segments, key=(lambda x: x.stime))
if args.no_overlap:
segments = filter_overlap(segments)
all_segments += segments
wav_scp.close()
textgrid_flist.close()
segments_file = codecs.open((Path(args.path) / 'segments_all'), 'w', 'utf-8')
utt2spk_file = codecs.open((Path(args.path) / 'utt2spk_all'), 'w', 'utf-8')
text_file = codecs.open((Path(args.path) / 'text_all'), 'w', 'utf-8')
for i in range(len(all_segments)):
utt_name = ('%s-%s-%07d-%07d' % (all_segments[i].uttid, all_segments[i].spkr, (all_segments[i].stime * 100), (all_segments[i].etime * 100)))
segments_file.write(('%s %s %.2f %.2f\n' % (utt_name, all_segments[i].uttid, all_segments[i].stime, all_segments[i].etime)))
utt2spk_file.write(('%s %s-%s\n' % (utt_name, all_segments[i].uttid, all_segments[i].spkr)))
text_file.write(('%s %s\n' % (utt_name, all_segments[i].text)))
segments_file.close()
utt2spk_file.close()
text_file.close() |
def test_rainfall():
with Simulation(MODEL_RAIN) as sim:
rg = RainGages(sim)['Gage1']
assert (rg.raingageid == 'Gage1')
sim.step_advance(3600)
for (ind, step) in enumerate(sim):
if (0 < ind < 5):
assert (rg.total_precip == 1)
assert (rg.rainfall == 1)
assert (rg.snowfall == 0)
if (ind == 5):
rg.total_precip = 10
if (ind >= 6):
assert (int(rg.total_precip) == 10)
assert (int(rg.rainfall) == 10)
assert (int(rg.snowfall) == 0)
stats = SystemStats(sim)
assert (int(stats.runoff_stats['rainfall']) == 65) |
def check_range(value, range_threshold=None):
try:
float(value)
except Exception:
return False
if (not range_threshold):
range_threshold = '~:'
range_threshold = str(range_threshold)
if (range_threshold[0] == ''):
return (not check_range(value, range_threshold[1:]))
if (range_threshold.find(':') > (- 1)):
(start, end) = range_threshold.split(':', 1)
else:
start = ''
end = range_threshold
if (start == '~'):
start = None
if (start == ''):
start = 0
if (end == ''):
end = None
try:
if ((start is not None) and (float(value) < float(start))):
return False
if ((end is not None) and (float(value) > float(end))):
return False
except ValueError:
raise InvalidThreshold(('Invalid threshold format: %s' % range_threshold))
return True |
def seed_all(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True |
class MLLT(Frame):
_framespec = [SizedIntegerSpec('frames', size=2, default=0), SizedIntegerSpec('bytes', size=3, default=0), SizedIntegerSpec('milliseconds', size=3, default=0), ByteSpec('bits_for_bytes', default=0), ByteSpec('bits_for_milliseconds', default=0), BinaryDataSpec('data')]
def __eq__(self, other):
return (self.data == other)
__hash__ = Frame.__hash__ |
class AllTypesSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.DATA_PARALLEL.value, ShardingType.TABLE_WISE.value, ShardingType.ROW_WISE.value, ShardingType.TABLE_ROW_WISE.value, ShardingType.COLUMN_WISE.value, ShardingType.TABLE_COLUMN_WISE.value]
def compute_kernels(self, sharding_type: str, compute_device_type: str) -> List[str]:
return [EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.FUSED.value, EmbeddingComputeKernel.FUSED_UVM.value, EmbeddingComputeKernel.FUSED_UVM_CACHING.value, EmbeddingComputeKernel.QUANT.value] |
class KDFDatabase(object):
def __init__(self, filename):
import sqlite3
conn = sqlite3.connect(filename, 30)
self.fragments = conn.execute('SELECT * FROM fragments;').fetchall()
conn.close()
def decode(self):
fragments_data = []
for (id, payload_type, payload_value) in self.fragments:
if ((payload_type == 'blob') and (id != 'max_id')):
fragment = PackedIon(StringIO.StringIO(payload_value).read()).decode()
fragments_data.append(TypedData(fragment.id, id.encode('utf8'), fragment.value))
return fragments_data |
class Foo(object):
class_var = 42
another_class_var = 42
class Meta(object):
def foo():
return True
def __init__(self, attr):
self.attr = attr
self.attr2 = attr
def property_simple(self) -> int:
return 42
def method_okay(self, foo=None, bar=None):
return True
def method_multiline(self, foo=None, bar=None, baz=None):
return True
def method_tricky(self, foo=None, bar=dict(foo=1, bar=2)):
return True
def method_sphinx_docs(self, foo, bar=0):
return (foo + bar)
def method_google_docs(self, foo, bar=0):
return (foo + bar)
def method_sphinx_unicode(self):
return 'sphinx'
def method_google_unicode(self):
return 'google' |
class TestImports(TestCase):
EXCLUSION_LIST = ['pysmt.test', 'pysmt.solvers', 'pysmt.cmd']
def test_imports(self):
stack = [(pysmt.__name__, pysmt.__path__)]
while stack:
(module_name, module_path) = stack.pop()
for (_, name, ispkg) in pkgutil.iter_modules(module_path):
fullname = ('%s.%s' % (module_name, name))
command = [sys.executable, '-c', ('import %s' % fullname)]
returncode = subprocess.call(command)
self.assertEqual(returncode, 0, msg=('Failed to import %s module' % fullname))
if (ispkg and (fullname not in TestImports.EXCLUSION_LIST)):
stack.append((fullname, [os.path.join(p, name) for p in module_path])) |
def _less_than_indices(left: pd.Series, right: pd.Series, strict: bool, multiple_conditions: bool, keep: str) -> tuple:
if (left.min() > right.max()):
return None
outcome = _null_checks_cond_join(left=left, right=right)
if (not outcome):
return None
(left, right, left_index, right_index, right_is_sorted, any_nulls) = outcome
search_indices = right.searchsorted(left, side='left')
len_right = right.size
rows_equal = (search_indices == len_right)
if rows_equal.any():
left = left[(~ rows_equal)]
left_index = left_index[(~ rows_equal)]
search_indices = search_indices[(~ rows_equal)]
if strict:
rows_equal = right[search_indices]
rows_equal = (left == rows_equal)
if rows_equal.any():
replacements = right.searchsorted(left, side='right')
search_indices = np.where(rows_equal, replacements, search_indices)
rows_equal = (search_indices == len_right)
if rows_equal.any():
left = left[(~ rows_equal)]
left_index = left_index[(~ rows_equal)]
search_indices = search_indices[(~ rows_equal)]
if (not search_indices.size):
return None
if multiple_conditions:
return (left_index, right_index, search_indices)
if (right_is_sorted and (keep == 'first')):
if any_nulls:
return (left_index, right_index[search_indices])
return (left_index, search_indices)
right = [right_index[ind:len_right] for ind in search_indices]
if (keep == 'first'):
right = [arr.min() for arr in right]
return (left_index, right)
if (keep == 'last'):
right = [arr.max() for arr in right]
return (left_index, right)
right = np.concatenate(right)
left = np.repeat(left_index, (len_right - search_indices))
return (left, right) |
class NonTensorData():
data: Any
def __post_init__(self):
if isinstance(self.data, NonTensorData):
self.data = self.data.data
old_eq = self.__class__.__eq__
if (old_eq is _eq):
global NONTENSOR_HANDLED_FUNCTIONS
NONTENSOR_HANDLED_FUNCTIONS.extend(TD_HANDLED_FUNCTIONS)
(_eq)
def __eq__(self, other):
if isinstance(other, NonTensorData):
return torch.full(self.batch_size, (self.data == other.data), device=self.device)
return old_eq(self, other)
self.__class__.__eq__ = __eq__
_ne = self.__class__.__ne__
(_ne)
def __ne__(self, other):
if isinstance(other, NonTensorData):
return torch.full(self.batch_size, (self.data != other.data), device=self.device)
return _ne(self, other)
self.__class__.__ne__ = __ne__
_xor = self.__class__.__xor__
(_xor)
def __xor__(self, other):
if isinstance(other, NonTensorData):
return torch.full(self.batch_size, (self.data ^ other.data), device=self.device)
return _xor(self, other)
self.__class__.__xor__ = __xor__
_or = self.__class__.__or__
(_or)
def __or__(self, other):
if isinstance(other, NonTensorData):
return torch.full(self.batch_size, (self.data | other.data), device=self.device)
return _or(self, other)
self.__class__.__or__ = __or__
def empty(self, recurse=False):
return NonTensorData(data=self.data, batch_size=self.batch_size, names=(self.names if self._has_names() else None), device=self.device)
def to_dict(self):
return self.data
def _stack_non_tensor(cls, list_of_non_tensor, dim=0):
first = list_of_non_tensor[0]
if all(((data.data == first.data) for data in list_of_non_tensor[1:])):
batch_size = list(first.batch_size)
batch_size.insert(dim, len(list_of_non_tensor))
return NonTensorData(data=first.data, batch_size=batch_size, names=(first.names if first._has_names() else None), device=first.device)
from tensordict._lazy import LazyStackedTensorDict
return LazyStackedTensorDict(*list_of_non_tensor, stack_dim=dim)
def __torch_function__(cls, func: Callable, types: tuple[(type, ...)], args: tuple[(Any, ...)]=(), kwargs: (dict[(str, Any)] | None)=None) -> Callable:
if ((func not in _TD_PASS_THROUGH) or (not all((issubclass(t, (Tensor, cls)) for t in types)))):
return NotImplemented
escape_conversion = (func in (torch.stack,))
if (kwargs is None):
kwargs = {}
if (len(args) > 0):
tensorclass_instance = args[0]
else:
tensorclass_instance = kwargs.get('input', kwargs['tensors'])
if isinstance(tensorclass_instance, (tuple, list)):
tensorclass_instance = tensorclass_instance[0]
if (not escape_conversion):
args = tuple((_arg_to_tensordict(arg) for arg in args))
kwargs = {key: _arg_to_tensordict(value) for (key, value) in kwargs.items()}
result = TD_HANDLED_FUNCTIONS[func](*args, **kwargs)
if isinstance(result, (list, tuple)):
return result.__class__((_from_tensordict_with_copy(tensorclass_instance, tensordict_result) for tensordict_result in result))
if (not escape_conversion):
return _from_tensordict_with_copy(tensorclass_instance, result)
return result |
class HomeTheaterTestDrive():
def main(*args):
amp: Amplifier = Amplifier('Amplifier')
tuner: Tuner = Tuner('AM/FM Tuner', amp)
player: StreamingPlayer = StreamingPlayer('Streaming Player', amp)
cd: CdPlayer = CdPlayer('CD Player', amp)
projector: Projector = Projector('Projector', player)
lights: TheaterLights = TheaterLights('Theater Ceiling Lights')
screen: Screen = Screen('Theater Screen')
popper: PopcornPopper = PopcornPopper('Popcorn Popper')
homeTheater: HomeTheaterFacade = HomeTheaterFacade(amp, tuner, player, projector, screen, lights, popper)
homeTheater.watchMovie('Raiders of the Lost Ark')
homeTheater.endMovie() |
def continuous_contracts(path_to_data_files: str):
start_date = str_to_date('2019-01-01')
end_date = str_to_date('2019-01-10')
fields = PriceField.ohlcv()
tickers = [PortaraTicker('VX', SecurityType.FUTURE, 1000), PortaraTicker('WEAT', SecurityType.FUTURE, 100)]
daily_freq = Frequency.DAILY
if (path_to_data_files is None):
raise ValueError('Please provide a correct path to the Portara data and assign it to the path_to_data_files variable.')
print('\nMultiple continuous tickers, daily frequency, open prices')
dp = PortaraDataProvider(path_to_data_files, tickers, fields, start_date, end_date, daily_freq)
prices = dp.get_price(tickers, PriceField.Open, start_date, end_date, daily_freq)
print(prices)
print('\nMultiple continuous tickers, daily frequency, close prices')
dp = PortaraDataProvider(path_to_data_files, tickers, fields, start_date, end_date, daily_freq)
prices = dp.get_price(tickers, PriceField.Close, start_date, end_date, daily_freq)
print(prices) |
class TestGrabKey(EndianTest):
def setUp(self):
self.req_args_0 = {'grab_window': , 'key': 223, 'keyboard_mode': 1, 'modifiers': 44275, 'owner_events': 1, 'pointer_mode': 1}
self.req_bin_0 = b'!\x01\x00\x04\x7fb\r\xdf\xac\xf3\xdf\x01\x01\x00\x00\x00'
def testPackRequest0(self):
bin = request.GrabKey._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.GrabKey._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def test_circular_control_curve_interpolated_json():
model = load_model('reservoir_with_circular_cc.json')
reservoir1 = model.nodes['reservoir1']
model.setup()
path = os.path.join(os.path.dirname(__file__), 'models', 'control_curve.csv')
control_curve = pd.read_csv(path)['Control Curve'].values
values = [(- 8), (- 6), (- 4)]
_rec(model, reservoir1.cost)
def expected_cost(timestep, si):
volume_factor = reservoir1._current_pc[si.global_id]
cc = control_curve[timestep.index]
return np.interp(volume_factor, [0.0, cc, 1.0], values[::(- 1)])
model.run() |
class SingleConvBlock(nn.Module):
def __init__(self, in_features, out_features, stride, use_bs=True):
super(SingleConvBlock, self).__init__()
self.use_bn = use_bs
self.conv = nn.Conv2d(in_features, out_features, 1, stride=stride, bias=True)
self.bn = nn.BatchNorm2d(out_features)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x |
def to_image(tensor, adaptive=False):
if (len(tensor.shape) == 4):
tensor = tensor[0]
if adaptive:
tensor = ((tensor - tensor.min()) / (tensor.max() - tensor.min()))
return ToPILImage()((255 * tensor.cpu().detach()).to(torch.uint8))
else:
tensor = ((tensor + 1) / 2)
tensor.clamp(0, 1)
return ToPILImage()((255 * tensor.cpu().detach()).to(torch.uint8)) |
class ViTHybridConfig(PretrainedConfig):
model_type = 'vit-hybrid'
def __init__(self, backbone_config=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=1, num_channels=3, backbone_featmap_shape=[1, 1024, 24, 24], qkv_bias=True, **kwargs):
super().__init__(**kwargs)
if (backbone_config is None):
logger.info('`backbone_config` is `None`. Initializing the config with a `BiT` backbone.')
backbone_config = {'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage3'], 'embedding_dynamic_padding': True}
if isinstance(backbone_config, dict):
if ('model_type' in backbone_config):
backbone_config_class = CONFIG_MAPPING[backbone_config['model_type']]
else:
logger.info('`model_type` is not found in `backbone_config`. Use `Bit` as the backbone configuration class.')
backbone_config_class = BitConfig
backbone_config = backbone_config_class(**backbone_config)
self.backbone_featmap_shape = backbone_featmap_shape
self.backbone_config = backbone_config
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
def to_dict(self) -> Dict[(str, any)]:
output = copy.deepcopy(self.__dict__)
output['backbone_config'] = self.backbone_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
_torch
_sentencepiece
_tokenizers
class PLBartPythonEnIntegrationTest(unittest.TestCase):
checkpoint_name = 'uclanlp/plbart-python-en_XX'
src_text = ['def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])']
tgt_text = ['Returns the maximum value of a b c.', 'Sums the values of a b c.']
expected_src_tokens = [134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE]
def setUpClass(cls):
cls.tokenizer: PLBartTokenizer = PLBartTokenizer.from_pretrained(cls.checkpoint_name, language_codes='base', src_lang='python', tgt_lang='en_XX')
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['java'], 50001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['python'], 50002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_XX'], 50003)
def test_python_en_tokenizer_batch_encode_plus(self):
ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_python_en_tokenizer_decode_ignores_language_codes(self):
self.assertIn(PYTHON_CODE, self.tokenizer.all_special_ids)
generated_ids = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_english = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_english)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_python_en_tokenizer_truncation(self):
src_text = [('def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20)]
self.assertIsInstance(src_text[0], str)
desired_max_length = 10
ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0]
self.assertEqual(ids[(- 2)], 2)
self.assertEqual(ids[(- 1)], PYTHON_CODE)
self.assertEqual(len(ids), desired_max_length)
def test_mask_token(self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'java']), [50004, 50001])
def test_special_tokens_unaffacted_by_save_load(self):
tmpdirname = tempfile.mkdtemp()
original_special_tokens = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(tmpdirname)
new_tok = PLBartTokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens)
_torch
def test_batch_fairseq_parity(self):
batch = self.tokenizer(self.src_text, padding=True)
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, return_tensors='pt')
labels = targets['input_ids']
batch['decoder_input_ids'] = shift_tokens_right(labels, self.tokenizer.pad_token_id).tolist()
self.assertEqual(batch.input_ids[1][(- 2):], [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0], EN_CODE)
self.assertEqual(batch.decoder_input_ids[1][(- 1)], 2)
self.assertEqual(labels[1][(- 2):].tolist(), [2, EN_CODE])
_torch
def test_python_en_tokenizer_prepare_batch(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors='pt')
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors='pt')
labels = targets['input_ids']
batch['decoder_input_ids'] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 26), batch.input_ids.shape)
self.assertEqual((2, 26), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, result)
self.assertEqual(2, batch.decoder_input_ids[(0, (- 1))])
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, PYTHON_CODE])
def test_seq2seq_max_length(self):
batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors='pt')
with self.tokenizer.as_target_tokenizer():
targets = self.tokenizer(self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors='pt')
labels = targets['input_ids']
batch['decoder_input_ids'] = shift_tokens_right(labels, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en_XX', tgt_lang='java')
self.assertEqual(nested_simplify(inputs), {'input_ids': [[150, 242, 2, 50003]], 'attention_mask': [[1, 1, 1, 1]], 'forced_bos_token_id': 50001}) |
class S3StoreTestCase(SqlAlchemyTestCase):
def setUpClass(cls):
super(S3StoreTestCase, cls).setUpClass()
cls.this_dir = abspath(dirname(__file__))
cls.stuff_path = join(cls.this_dir, 'stuff')
cls.dog_jpeg = join(cls.stuff_path, 'dog.jpg')
cls.base_url = '
cls.sample_text_file1 = join(cls.stuff_path, 'sample_text_file1.txt')
def test_put_from_stream(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
store = create_s3_store(base_url=uri)
target_filename = 'test_put_from_stream/file_from_stream1.txt'
content = b'Lorem ipsum dolor sit amet'
stream = io.BytesIO(content)
length = store.put(target_filename, stream)
self.assertEqual(length, len(content))
self.assertIsInstance(store.open(target_filename), io.BytesIO)
def test_put_error(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
store = create_s3_store(base_url=uri[:(- 2)])
target_filename = 'test_put_from_stream/file_from_stream1.txt'
content = b'Lorem ipsum dolor sit amet'
stream = io.BytesIO(content)
with self.assertRaises(S3Error):
store.put(target_filename, stream)
def test_rrs_put(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
StoreManager.register('s3', functools.partial(create_s3_store, base_url=uri), default=True)
class Thumbnail(BaseThumbnail):
__reproducible__ = True
class Image(BaseImage):
__thumbnail_type__ = Thumbnail
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
image = Column(Image.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.image)
with StoreManager(session):
person1 = Person()
person1.image = Image.create_from(self.dog_jpeg)
self.assertIsInstance(person1.image, Image)
thumbnail = person1.image.get_thumbnail(width=100, auto_generate=True)
self.assertIsInstance(thumbnail, Thumbnail)
self.assertTrue(thumbnail.reproducible, True)
def test_delete(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
store = create_s3_store(base_url=uri)
target_filename = 'test_delete/sample_text_file1.txt'
with open(self.sample_text_file1, 'rb') as f:
length = store.put(target_filename, f)
self.assertEqual(length, getsize(self.sample_text_file1))
self.assertIsInstance(store.open(target_filename), io.BytesIO)
store.delete(target_filename)
with self.assertRaises(S3Error):
store.open(target_filename)
def test_delete_error(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
store = create_s3_store(base_url=uri)
wrong_store = create_s3_store(base_url=uri[:(- 2)])
target_filename = 'test_delete/sample_text_file1.txt'
with open(self.sample_text_file1, 'rb') as f:
length = store.put(target_filename, f)
self.assertEqual(length, getsize(self.sample_text_file1))
self.assertIsInstance(store.open(target_filename), io.BytesIO)
with self.assertRaises(S3Error):
wrong_store.delete(target_filename)
def test_open(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
store = create_s3_store(base_url=uri)
target_filename = 'test_delete/sample_text_file1.txt'
with open(self.sample_text_file1, 'rb') as f:
length = store.put(target_filename, f)
self.assertEqual(length, getsize(self.sample_text_file1))
self.assertIsInstance(store.open(target_filename), io.BytesIO)
with store.open(target_filename, mode='rb') as stored_file, open(self.sample_text_file1, mode='rb') as original_file:
self.assertEqual(stored_file.read(), original_file.read())
def test_locate(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
StoreManager.register('s3', functools.partial(create_s3_store, base_url=uri), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s/%s?_ts=%s' % (uri, person1.file.path, person1.file.timestamp)))
def test_prefix(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
prefix = 'test'
StoreManager.register('s3', functools.partial(create_s3_store, base_url=uri, prefix=prefix), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s/%s/%s?_ts=%s' % (uri, prefix, person1.file.path, person1.file.timestamp)))
def test_default_base_url(self):
store = S3Store(TEST_BUCKET, TEST_ACCESS_KEY, TEST_SECRET_KEY, TEST_REGION)
assert (store.base_url == (' % TEST_BUCKET))
def test_public_base_url_strip(self):
with mockup_s3_server(TEST_BUCKET) as (server, uri):
base_url = ('%s/' % uri)
StoreManager.register('s3', functools.partial(create_s3_store, base_url=base_url), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s%s?_ts=%s' % (base_url, person1.file.path, person1.file.timestamp)))
def test_cdn_url(self):
cdn_url = '
with mockup_s3_server(TEST_BUCKET) as (server, uri):
StoreManager.register('s3', functools.partial(create_s3_store, base_url=uri, cdn_url=cdn_url), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s/%s?_ts=%s' % (cdn_url, person1.file.path, person1.file.timestamp)))
def test_cdn_url_strip(self):
cdn_url = '
with mockup_s3_server(TEST_BUCKET) as (server, uri):
StoreManager.register('s3', functools.partial(create_s3_store, base_url=uri, cdn_url=cdn_url), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s%s?_ts=%s' % (cdn_url, person1.file.path, person1.file.timestamp)))
def test_cdn_url_with_prefix(self):
prefix = 'media'
cdn_url = '
with mockup_s3_server(TEST_BUCKET) as (server, uri):
StoreManager.register('s3', functools.partial(create_s3_store, prefix=prefix, base_url=uri, cdn_url=cdn_url), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s/%s/%s?_ts=%s' % (cdn_url, prefix, person1.file.path, person1.file.timestamp)))
def test_cdn_url_with_ignore_prefix(self):
prefix = 'media'
cdn_url = '
with mockup_s3_server(TEST_BUCKET) as (server, uri):
StoreManager.register('s3', functools.partial(create_s3_store, prefix=prefix, base_url=uri, cdn_url=cdn_url, cdn_prefix_ignore=True), default=True)
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
file = Column(File.as_mutable(Json))
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.file)
sample_content = b'Simple text.'
with StoreManager(session):
person1 = Person()
person1.file = File.create_from(io.BytesIO(sample_content), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.file, File)
self.assertEqual(person1.file.locate(), ('%s/%s?_ts=%s' % (cdn_url, person1.file.path, person1.file.timestamp))) |
def add_filter_options(parser):
grp = OptionGroup(parser, 'Trace frequency filter options')
grp.add_option('--lowpass', dest='lowpass_frequency', type=float, help='The value of the lowpass filter applied to traces.', default=None)
grp.add_option('--lowpass_rel', dest='rel_lowpass_frequency', type=float, help="The percentage of the store's sampling rate to be used as the lowpass filter.", default=None)
grp.add_option('--highpass', dest='highpass_frequency', type=float, help='The value of the highpass filter applied to traces.', default=None)
grp.add_option('--highpass_rel', dest='rel_highpass_frequency', type=float, help="The percentage of the store's samping rate to be used as the lowpass filter.", default=None)
parser.add_option_group(grp) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.