code stringlengths 101 5.91M |
|---|
class BaseAction(object):
def __init__(self, gt_graph, env, rewards, strict=True):
self.gt_graph = gt_graph
self.env = env
self.rewards = rewards
self.strict = strict
def get_reward(self, state, prev_state, expert_plan, goal_idx):
(reward, done) = (self.rewards['neutral'], True)
return (reward, done) |
class AbstractPartitionDiagrams(Parent, UniqueRepresentation):
Element = AbstractPartitionDiagram
def __init__(self, order, category=None):
if (category is None):
category = FiniteEnumeratedSets()
Parent.__init__(self, category=category)
if (order in ZZ):
self.order = ZZ(order)
base_set = frozenset((list(range(1, (order + 1))) + list(range((- order), 0))))
else:
self.order = QQ(order)
base_set = frozenset((list(range(1, (ZZ(((ZZ(1) / ZZ(2)) + order)) + 1))) + list(range(ZZ((((- ZZ(1)) / ZZ(2)) - order)), 0))))
self._set = base_set
def _repr_(self):
return '{} diagrams of order {}'.format(self._name, self.order)
def __iter__(self):
for i in self._diagram_func.__func__(self.order):
(yield self.element_class(self, i, check=False))
def __contains__(self, obj):
if (not hasattr(obj, '_base_diagram')):
try:
obj = self._element_constructor_(obj)
except (ValueError, TypeError):
return False
if obj.base_diagram():
tst = sorted(flatten(obj.base_diagram()))
if ((len(tst) % 2) or (tst != (list(range(((- len(tst)) // 2), 0)) + list(range(1, ((len(tst) // 2) + 1)))))):
return False
return True
return (self.order == 0)
def _element_constructor_(self, d):
return self.element_class(self, d) |
def module_init():
root_module = Module('ns.applications', cpp_namespace='::ns3')
return root_module |
def _lu_impl(A, pivot=True, get_infos=False, out=None):
return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) |
class SHD():
__SHD = 0
def __init__(self, truth: Graph, est: Graph):
nodes = truth.get_nodes()
nodes_name = [node.get_name() for node in nodes]
self.__SHD: int = 0
for i in list(range(0, len(nodes))):
for j in list(range((i + 1), len(nodes))):
if (truth.get_edge(truth.get_node(nodes_name[i]), truth.get_node(nodes_name[j])) and (not est.get_edge(est.get_node(nodes_name[i]), est.get_node(nodes_name[j])))):
self.__SHD += 1
if ((not truth.get_edge(truth.get_node(nodes_name[i]), truth.get_node(nodes_name[j]))) and est.get_edge(est.get_node(nodes_name[i]), est.get_node(nodes_name[j]))):
self.__SHD += 1
for i in list(range(0, len(nodes))):
for j in list(range(0, len(nodes))):
if (not truth.get_edge(truth.get_node(nodes_name[i]), truth.get_node(nodes_name[j]))):
continue
if (not est.get_edge(est.get_node(nodes_name[i]), est.get_node(nodes_name[j]))):
continue
if ((truth.get_endpoint(truth.get_node(nodes_name[i]), truth.get_node(nodes_name[j])) == Endpoint.ARROW) and (est.get_endpoint(est.get_node(nodes_name[j]), est.get_node(nodes_name[i])) == Endpoint.ARROW)):
self.__SHD += 1
def get_shd(self) -> int:
return self.__SHD |
def rprop(params: List[Tensor], grads: List[Tensor], prevs: List[Tensor], step_sizes: List[Tensor], *, step_size_min: float, step_size_max: float, etaminus: float, etaplus: float):
for (i, param) in enumerate(params):
grad = grads[i]
prev = prevs[i]
step_size = step_sizes[i]
sign = grad.mul(prev).sign()
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
step_size.mul_(sign).clamp_(step_size_min, step_size_max)
grad = grad.clone(memory_format=torch.preserve_format)
grad[sign.eq(etaminus)] = 0
param.addcmul_(grad.sign(), step_size, value=(- 1))
prev.copy_(grad) |
def point_accuracy(expected, observed, data=None, start=None, end=None):
return _accuracy(expected, observed, data, start, end, cm=point_confusion_matrix) |
def compute_grad2(d_out, x_in):
batch_size = x_in.size(0)
grad_dout = autograd.grad(outputs=d_out.sum(), inputs=x_in, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = grad_dout2.view(batch_size, (- 1)).sum(1)
return reg |
class RegLog(nn.Module):
def __init__(self, num_labels, arch='resnet50', global_avg=False, use_bn=True):
super(RegLog, self).__init__()
self.bn = None
if global_avg:
if (arch == 'resnet18'):
s = 2048
if (arch == 'resnet50'):
s = 2048
elif (arch == 'resnet50w2'):
s = 4096
elif (arch == 'resnet50w4'):
s = 8192
self.av_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
assert (arch == 'resnet50')
s = 8192
self.av_pool = nn.AvgPool2d(6, stride=1)
if use_bn:
self.bn = nn.BatchNorm2d(2048)
self.linear = nn.Linear(s, num_labels)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x):
x = self.av_pool(x)
if (self.bn is not None):
x = self.bn(x)
x = x.view(x.size(0), (- 1))
return self.linear(x) |
def count_node_freq(fname, filter_size=100):
node_dict = {}
with open(fname, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
ctr = 0
for row in csv_reader:
if (ctr == 0):
ctr += 1
continue
else:
token_type = row[0]
src = row[1]
if (src not in node_dict):
node_dict[src] = 1
else:
node_dict[src] += 1
dst = row[2]
if (dst not in node_dict):
node_dict[dst] = 1
else:
node_dict[dst] += 1
ctr += 1
num_10 = 0
num_100 = 0
num_1000 = 0
num_2000 = 0
num_5000 = 0
for node in node_dict:
if (node_dict[node] >= 10):
num_10 += 1
if (node_dict[node] >= 100):
num_100 += 1
if (node_dict[node] >= 1000):
num_1000 += 1
if (node_dict[node] >= 2000):
num_2000 += 1
if (node_dict[node] >= 5000):
num_5000 += 1
print('number of nodes with # edges >= 10 is ', num_10)
print('number of nodes with # edges >= 100 is ', num_100)
print('number of nodes with # edges >= 1000 is ', num_1000)
print('number of nodes with # edges >= 2000 is ', num_2000)
print('number of nodes with # edges >= 5000 is ', num_5000)
print('high level statistics')
node_dict_filtered = {}
for node in node_dict:
if (node_dict[node] >= filter_size):
node_dict_filtered[node] = node_dict[node]
return node_dict_filtered |
def test_fista_multiclass_classes(mult_dense_train_data):
(X, y) = mult_dense_train_data
clf = FistaClassifier()
clf.fit(X, y)
assert (list(clf.classes_) == [0, 1, 2]) |
def convert_sr(inpath, sr, output_path=None):
if (not output_path):
output_path = generate_tmp_filename('wav')
cmd = f'sox {inpath} -r {sr} {output_path}'
os.system(cmd)
return output_path |
def uce_loss_and_reg(alpha: torch.Tensor, y: torch.Tensor, beta_reg: float, reduction: str='sum') -> torch.Tensor:
uce = uce_loss(alpha, y, reduction='none')
reg = entropy_reg(alpha, beta_reg, reduction='none')
loss = (uce + reg)
return loss_reduce(loss, reduction=reduction) |
def add_extras(cfg, i, batch_norm=False):
layers = []
in_channels = i
flag = False
for (k, v) in enumerate(cfg):
if (in_channels != 'S'):
if (v == 'S'):
layers += [nn.Conv2d(in_channels, cfg[(k + 1)], kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = (not flag)
in_channels = v
return layers |
class BaseInstrumenter(_BaseInstrumenter):
def register(self):
pass
def unregister(self):
pass
def get_registered(self):
return None
def run(self, cmd, globals=None, locals=None):
pass
def region_begin(self, module_name, function_name, file_name, line_number, code_object):
pass
def region_end(self, module_name, function_name, code_object):
pass
def rewind_begin(self, name, file_name=None, line_number=None):
pass
def rewind_end(self, name, value):
pass
def user_enable_recording(self):
pass
def user_disable_recording(self):
pass
def user_parameter_int(self, name, val):
pass
def user_parameter_uint(self, name, val):
pass
def user_parameter_string(self, name, string):
pass
def force_finalize(self):
pass
def reregister_exit_handler(self):
pass |
def get_file_path(*paths):
path = '/'.join(paths)
return pkg_resources.resource_filename(_package_name, path) |
class SolarPlant(BaseDataset):
def __init__(self, rootdir=None, num_columns=100):
super().__init__()
if (rootdir is None):
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, '..', '..', '..'))
rootdir = os.path.join(merlion_root, 'data', 'multivariate', 'solar_plant')
assert ('solar_plant' in rootdir.split('/')[(- 1)]), 'solar_plant should be found as the last level of the directory for this dataset'
fnames = glob.glob(f'{rootdir}/*.csv')
if ((len(fnames) == 0) and os.path.isfile(f'{rootdir}/merged.zip')):
with zipfile.ZipFile(f'{rootdir}/merged.zip', 'r') as zip_ref:
zip_ref.extractall(rootdir)
fnames = glob.glob(f'{rootdir}/*.csv')
assert (len(fnames) == 1), f'rootdir {rootdir} does not contain dataset file.'
for (i, fn) in enumerate(sorted(fnames)):
df = pd.read_csv(fn)
df['timestamp'] = pd.to_datetime(df['Datetime'])
df.set_index('timestamp', inplace=True)
df.drop(['LocalTime', 'Datetime'], axis=1, inplace=True)
num_columns = min(num_columns, len(df.columns))
cols = [f'Power_{i}' for i in range(num_columns)]
df = df[cols]
assert isinstance(df.index, pd.DatetimeIndex)
df.sort_index(inplace=True)
self.time_series.append(df)
self.metadata.append({'trainval': pd.Series((df.index <= '2006-10-01 00:00:00'), index=df.index), 'granularity': '30min', 'aggregation': 'Sum'}) |
class VectorFieldFreeModule(FiniteRankFreeModule):
Element = VectorFieldParal
def __init__(self, domain, dest_map=None):
from sage.manifolds.differentiable.scalarfield import DiffScalarField
self._domain = domain
if (dest_map is None):
dest_map = domain.identity_map()
self._dest_map = dest_map
self._ambient_domain = self._dest_map._codomain
name = ('X(' + domain._name)
latex_name = ('\\mathfrak{X}\\left(' + domain._latex_name)
if (dest_map is not domain.identity_map()):
dm_name = dest_map._name
dm_latex_name = dest_map._latex_name
if (dm_name is None):
dm_name = 'unnamed map'
if (dm_latex_name is None):
dm_latex_name = '\\mathrm{unnamed\\; map}'
name += (',' + dm_name)
latex_name += (',' + dm_latex_name)
name += ')'
latex_name += '\\right)'
manif = self._ambient_domain.manifold()
cat = Modules(domain.scalar_field_algebra()).FiniteDimensional()
FiniteRankFreeModule.__init__(self, domain.scalar_field_algebra(), manif._dim, name=name, latex_name=latex_name, start_index=manif._sindex, output_formatter=DiffScalarField.coord_function, category=cat)
self._induced_bases = {}
if (self._dest_map != self._domain.identity_map()):
for frame in self._ambient_domain._top_frames:
if (frame.destination_map() == self._ambient_domain.identity_map()):
basis = self.basis(from_frame=frame)
self._induced_bases[frame] = basis
for dom in domain.open_supersets():
if (dom is not domain):
for supbase in dom._frames:
if ((supbase.domain() is dom) and (supbase.destination_map().restrict(domain) is self._dest_map) and (domain not in supbase._restrictions)):
supbase._restrictions[domain] = basis
supbase._subframes.add(basis)
basis._superframes.add(supbase)
for superframe in basis._superframes:
for subframe in superframe._subframes:
if ((subframe.domain() is not domain) and subframe.domain().is_subset(self._domain) and (self._dest_map.restrict(subframe.domain()) is subframe.destination_map())):
subframe._superframes.update(basis._superframes)
basis._subframes.update(subframe._subframes)
basis._restrictions.update(subframe._restrictions)
def _element_constructor_(self, comp=[], basis=None, name=None, latex_name=None):
try:
if comp.is_trivial_zero():
return self.zero()
except AttributeError:
if (comp == 0):
return self.zero()
if isinstance(comp, VectorField):
if (self._domain.is_subset(comp._domain) and self._ambient_domain.is_subset(comp._ambient_domain)):
return comp.restrict(self._domain)
else:
raise ValueError(('cannot convert the {}'.format(comp) + 'to a vector field in {}'.format(self)))
if (not isinstance(comp, (list, tuple))):
raise TypeError(('cannot convert the {} '.format(comp) + 'to an element of {}'.format(self)))
resu = self.element_class(self, name=name, latex_name=latex_name)
if comp:
resu.set_comp(basis=basis)[:] = comp
return resu
def _coerce_map_from_(self, other):
if isinstance(other, (VectorFieldModule, VectorFieldFreeModule)):
return (self._domain.is_subset(other._domain) and self._ambient_domain.is_subset(other._ambient_domain))
else:
return False
def _repr_(self):
description = 'Free module '
if (self._name is not None):
description += (self._name + ' ')
description += 'of vector fields '
if (self._dest_map is self._domain.identity_map()):
description += 'on the {}'.format(self._domain)
else:
description += ('along the {}'.format(self._domain) + ' mapped into the {}'.format(self._ambient_domain))
return description
def domain(self) -> DifferentiableManifold:
return self._domain
def ambient_domain(self) -> DifferentiableManifold:
return self._ambient_domain
def destination_map(self) -> DiffMap:
return self._dest_map
def tensor_module(self, k, l, *, sym=None, antisym=None):
if (sym or antisym):
raise NotImplementedError
try:
return self._tensor_modules[(k, l)]
except KeyError:
if ((k, l) == (1, 0)):
T = self
elif ((k, l) == (0, 1)):
T = self.dual()
else:
from sage.manifolds.differentiable.tensorfield_module import TensorFieldFreeModule
T = TensorFieldFreeModule(self, (k, l))
self._tensor_modules[(k, l)] = T
return T
def exterior_power(self, p):
try:
return self._exterior_powers[p]
except KeyError:
if (p == 0):
L = self._ring
elif (p == 1):
L = self
else:
from sage.manifolds.differentiable.multivector_module import MultivectorFreeModule
L = MultivectorFreeModule(self, p)
self._exterior_powers[p] = L
return L
def dual_exterior_power(self, p):
try:
return self._dual_exterior_powers[p]
except KeyError:
if (p == 0):
L = self._ring
elif (p == 1):
from sage.manifolds.differentiable.diff_form_module import VectorFieldDualFreeModule
L = VectorFieldDualFreeModule(self)
else:
from sage.manifolds.differentiable.diff_form_module import DiffFormFreeModule
L = DiffFormFreeModule(self, p)
self._dual_exterior_powers[p] = L
return L
def general_linear_group(self):
from sage.manifolds.differentiable.automorphismfield_group import AutomorphismFieldParalGroup
return AutomorphismFieldParalGroup(self)
def basis(self, symbol=None, latex_symbol=None, from_frame=None, indices=None, latex_indices=None, symbol_dual=None, latex_symbol_dual=None):
from sage.manifolds.differentiable.vectorframe import VectorFrame
if (symbol is None):
if (from_frame is None):
return self.default_basis()
else:
symbol = from_frame._symbol
latex_symbol = from_frame._latex_symbol
indices = from_frame._indices
latex_indices = from_frame._latex_indices
symbol_dual = from_frame._symbol_dual
latex_symbol_dual = from_frame._latex_symbol_dual
for other in self._known_bases:
if (symbol == other._symbol):
return other
return VectorFrame(self, symbol, latex_symbol=latex_symbol, from_frame=from_frame, indices=indices, latex_indices=latex_indices, symbol_dual=symbol_dual, latex_symbol_dual=latex_symbol_dual)
def _tensor(self, tensor_type, name=None, latex_name=None, sym=None, antisym=None, specific_type=None):
from sage.manifolds.differentiable.automorphismfield import AutomorphismField, AutomorphismFieldParal
from sage.manifolds.differentiable.metric import PseudoRiemannianMetric, DegenerateMetric
from sage.tensor.modules.comp import CompWithSym
(sym, antisym) = CompWithSym._canonicalize_sym_antisym((tensor_type[0] + tensor_type[1]), sym, antisym)
if (tensor_type == (1, 0)):
return self.element_class(self, name=name, latex_name=latex_name)
elif (tensor_type == (0, 1)):
return self.linear_form(name=name, latex_name=latex_name)
elif ((tensor_type == (1, 1)) and (specific_type is not None)):
if issubclass(specific_type, (AutomorphismField, AutomorphismFieldParal)):
return self.automorphism(name=name, latex_name=latex_name)
elif ((tensor_type[0] == 0) and (tensor_type[1] > 1) and antisym):
if (len(antisym[0]) == tensor_type[1]):
return self.alternating_form(tensor_type[1], name=name, latex_name=latex_name)
elif ((tensor_type[0] > 1) and (tensor_type[1] == 0) and antisym):
if (len(antisym[0]) == tensor_type[0]):
return self.alternating_contravariant_tensor(tensor_type[0], name=name, latex_name=latex_name)
elif ((tensor_type == (0, 2)) and (specific_type is not None)):
if issubclass(specific_type, PseudoRiemannianMetric):
return self.metric(name, latex_name=latex_name)
if issubclass(specific_type, DegenerateMetric):
sign = self._domain._dim
return self.metric(name, latex_name=latex_name, signature=(0, (sign - 1), 1))
return self.tensor_module(*tensor_type).element_class(self, tensor_type, name=name, latex_name=latex_name, sym=sym, antisym=antisym)
def tensor_from_comp(self, tensor_type, comp, name=None, latex_name=None):
from sage.tensor.modules.comp import CompWithSym, CompFullyAntiSym
if (comp._ring is not self._ring):
raise ValueError('the components are not defined on the same ring as the module')
if (comp._frame not in self._known_bases):
raise ValueError('the components are not defined on a basis of the module')
if (comp._nid != (tensor_type[0] + tensor_type[1])):
raise ValueError('number of component indices not compatible with the tensor type')
if (tensor_type == (1, 0)):
resu = self.element_class(self, name=name, latex_name=latex_name)
elif (tensor_type == (0, 1)):
resu = self.linear_form(name=name, latex_name=latex_name)
elif ((tensor_type[0] == 0) and (tensor_type[1] > 1) and isinstance(comp, CompFullyAntiSym)):
resu = self.alternating_form(tensor_type[1], name=name, latex_name=latex_name)
elif ((tensor_type[0] > 1) and (tensor_type[1] == 0) and isinstance(comp, CompFullyAntiSym)):
resu = self.alternating_contravariant_tensor(tensor_type[0], name=name, latex_name=latex_name)
else:
resu = self.tensor_module(*tensor_type).element_class(self, tensor_type, name=name, latex_name=latex_name)
if isinstance(comp, CompWithSym):
resu._sym = comp._sym
resu._antisym = comp._antisym
resu._components[comp._frame] = comp
return resu
def sym_bilinear_form(self, name=None, latex_name=None):
return self.tensor((0, 2), name=name, latex_name=latex_name, sym=(0, 1))
def metric(self, name, signature=None, latex_name=None):
ndim = self._ambient_domain.dimension()
try:
for elt in signature:
if ((elt < 0) or (not isinstance(elt, (int, Integer)))):
raise ValueError('{} must be a positive integer'.format(elt))
sign = ((signature[0] + signature[1]) + signature[2])
if (sign != ndim):
raise ValueError(('{} is different from the dimension'.format(sign) + ' of the manifold, who is {}'.format(ndim)))
if (signature[2] != 0):
from sage.manifolds.differentiable.metric import DegenerateMetricParal
return DegenerateMetricParal(self, name, signature=signature, latex_name=latex_name)
except TypeError:
pass
if (signature is None):
signature = (ndim, 0)
if isinstance(signature, (Integer, int)):
if (((signature + ndim) % 2) == 1):
if ((ndim % 2) == 0):
raise ValueError('the metric signature must be even')
else:
raise ValueError('the metric signature must be odd')
signature = (int(((ndim + signature) / 2)), int(((ndim - signature) / 2)))
from sage.manifolds.differentiable.metric import PseudoRiemannianMetricParal
return PseudoRiemannianMetricParal(self, name, signature=(signature[0] - signature[1]), latex_name=latex_name)
def symplectic_form(self, name: Optional[str]=None, latex_name: Optional[str]=None):
from sage.manifolds.differentiable.symplectic_form import SymplecticFormParal
return SymplecticFormParal(self, name, latex_name)
def poisson_tensor(self, name: Optional[str]=None, latex_name: Optional[str]=None):
from sage.manifolds.differentiable.poisson_tensor import PoissonTensorFieldParal
return PoissonTensorFieldParal(self, name, latex_name) |
class PetDataset(Dataset):
def __init__(self, data_cfg, dictionary=None, transform=None, target_transform=None, stage='train'):
super(PetDataset, self).__init__()
self.data_cfg = data_cfg
self.dictionary = dictionary
self.transform = transform
self.target_transform = target_transform
self.stage = stage
self._imgs = []
self._labels = []
if (self.stage == 'infer'):
for (root, fnames, _) in sorted(os.walk(data_cfg.IMG_DIR)):
for fname in sorted(fnames):
self._imgs.extend(glob.glob(os.path.join(root, fname, data_cfg.IMG_SUFFIX)))
else:
self.cls_label = [d.name for d in os.scandir(data_cfg.IMG_DIR) if d.is_dir()]
for (root, fnames, _) in sorted(os.walk(data_cfg.IMG_DIR)):
for fname in sorted(fnames):
imgs = glob.glob(os.path.join(root, fname, data_cfg.IMG_SUFFIX))
self._imgs.extend(imgs)
self._labels.extend([self.cls_label.index(fname) for _ in imgs])
def __getitem__(self, idx):
(img, label) = (Image.open(self._imgs[idx]).convert('RGB'), self._labels[idx])
if (self.transform is not None):
img = self.transform(img)
if (self.stage == 'infer'):
return img
else:
if (self.target_transform is not None):
label = self.target_transform(label)
return (img, label)
def __len__(self):
return len(self._imgs) |
class FreeCommutativeAdditiveSemigroup(UniqueRepresentation, Parent):
def __init__(self, alphabet=('a', 'b', 'c', 'd')):
self.alphabet = alphabet
Parent.__init__(self, category=CommutativeAdditiveSemigroups())
def _repr_(self):
return ('An example of a commutative semigroup: the free commutative semigroup generated by %s' % (self.alphabet,))
def summation(self, x, y):
assert (x in self)
assert (y in self)
return self(((a, (x.value[a] + y.value[a])) for a in self.alphabet))
_method
def additive_semigroup_generators(self):
return Family([self(((a, 1),)) for a in self.alphabet])
def an_element(self):
return self(((a, (ord(a) - 96)) for a in self.alphabet))
class Element(ElementWrapper):
def __init__(self, parent, iterable):
d = {a: 0 for a in parent.alphabet}
for (a, c) in iterable:
d[a] = c
ElementWrapper.__init__(self, parent, d)
def _repr_(self):
d = self.value
result = ' + '.join(((('%s*%s' % (d[a], a)) if (d[a] != 1) else a) for a in sorted(d.keys()) if (d[a] != 0)))
return ('0' if (result == '') else result)
def __hash__(self):
return hash(tuple(self.value.items())) |
def test_RecordArray_NumpyArray_four():
ak_array_four = ak.contents.recordarray.RecordArray([], None, 10)
data_frame_four = ak.to_rdataframe({'four': ak_array_four})
assert str(data_frame_four.GetColumnType('four')).startswith('awkward::Record_') |
def test_prod_two_funs():
var1 = optplan.Parameter()
var2 = optplan.Parameter()
prod1 = (var1 * var2)
assert isinstance(prod1, optplan.Product)
assert (prod1.functions == [var1, var2]) |
_vision
class ChineseCLIPProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '', '', '', '', '15', '', 'alex', '##andra', ',', '', '-', 't', 'shirt']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
image_processor_map = {'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0., 0.4578275, 0.], 'image_std': [0., 0., 0.], 'do_convert_rgb': True}
self.image_processor_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(image_processor_map, fp)
def get_tokenizer(self, **kwargs):
return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_image_processor(self, **kwargs):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_default(self):
tokenizer_slow = self.get_tokenizer()
tokenizer_fast = self.get_rust_tokenizer()
image_processor = self.get_image_processor()
processor_slow = ChineseCLIPProcessor(tokenizer=tokenizer_slow, image_processor=image_processor)
processor_slow.save_pretrained(self.tmpdirname)
processor_slow = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=False)
processor_fast = ChineseCLIPProcessor(tokenizer=tokenizer_fast, image_processor=image_processor)
processor_fast.save_pretrained(self.tmpdirname)
processor_fast = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, BertTokenizer)
self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, ChineseCLIPImageProcessor)
self.assertIsInstance(processor_fast.image_processor, ChineseCLIPImageProcessor)
def test_save_load_pretrained_additional_features(self):
processor = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(cls_token='(CLS)', sep_token='(SEP)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False)
processor = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, cls_token='(CLS)', sep_token='(SEP)', do_normalize=False)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, BertTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, ChineseCLIPImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'Alexandra,T-shirt15'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'Alexandra,T-shirt15'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'Alexandra,T-shirt15'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), processor.model_input_names) |
def get_nnet(name, **kwargs):
if (name == 'uvit'):
from libs.uvit import UViT
return UViT(**kwargs)
elif (name == 'uvit_t2i'):
from libs.uvit_t2i import UViT
return UViT(**kwargs)
else:
raise NotImplementedError(name) |
def gen_template_struct(struct_name, args, codeBody, speicalized=None, set_default=True, export_args=True):
code_gen = ''
code_gen += gen_template_head(args, set_default)
code = (export_template_args(args) + codeBody)
if (export_args is False):
code = codeBody
code_gen += gen_struct(struct_name, code, speicalized)
return code_gen |
def _get_worker_env(worker_id, config, partitions, search):
workers = config.resource_info['worker']
worker_info = workers[worker_id]
num_workers = len(workers)
try:
parallax_log_level = os.environ['PARALLAX_LOG_LEVEL']
except:
parallax_log_level = logging.INFO
env = {'CUDA_VISIBLE_DEVICES': ','.join((str(gpuid) for gpuid in worker_info['gpus'])), 'PARALLAX_LOG_LEVEL': parallax_log_level, PARALLAX_RUN_OPTION: PARALLAX_RUN_PS, PARALLAX_RESOURCE_INFO: serialize_resource_info(config.resource_info), PARALLAX_WORKER_ID: worker_id, PARALLAX_NUM_WORKERS: num_workers, PARALLAX_SEARCH: search}
if partitions:
env[PARALLAX_PARTITIONS] = partitions
return env |
def test_event():
e1 = Event(0, None)
assert ((e1.time == 0) and (e1.priority == math.inf))
e2 = Event(5, None)
assert ((e2.time == 5) and (e2.priority == math.inf))
e3 = Event(5, None, 1)
assert ((e3.time == 5) and (e3.priority == 1))
assert (e1 < e2)
assert (e1 < e3)
assert (e3 < e2) |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}
if (model_ema is not None):
to_save['model_ema'] = get_state_dict(model_ema.ema)
save_on_master(to_save, checkpoint_path)
if (is_main_process() and isinstance(epoch, int)):
to_del = (epoch - (args.save_ckpt_num * args.save_ckpt_freq))
old_ckpt = (output_dir / ('checkpoint-%s.pth' % to_del))
if os.path.exists(old_ckpt):
os.remove(old_ckpt) |
class AutoTuner():
def __init__(self, sdfg: dace.SDFG) -> None:
self._sdfg = sdfg
def optimize(self, apply: bool=True, measurements: int=30) -> Dict[(Any, Any)]:
return {} |
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100), avg_non_ignore=False, **kwargs):
if (pred.size(1) == 1):
assert (label[(label != ignore_index)].max() <= 1), 'For pred with shape [N, 1, H, W], its label must have at most 2 classes'
pred = pred.squeeze(1)
if (pred.dim() != label.dim()):
assert (((pred.dim() == 2) and (label.dim() == 1)) or ((pred.dim() == 4) and (label.dim() == 3))), 'Only pred shape [N, C], label shape [N] or pred shape [N, C, H, W], label shape [N, H, W] are supported'
(label, weight, valid_mask) = _expand_onehot_labels(label, weight, pred.shape, ignore_index)
else:
valid_mask = ((label >= 0) & (label != ignore_index)).float()
if (weight is not None):
weight = (weight * valid_mask)
else:
weight = valid_mask
if ((reduction == 'mean') and (avg_factor is None) and avg_non_ignore):
avg_factor = valid_mask.sum().item()
loss = F.binary_cross_entropy_with_logits(pred, label.float(), pos_weight=class_weight, reduction='none')
loss = weight_reduce_loss(loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss |
def val_test_split(dataset, val_size=5000, batch_size=512, num_workers=5, pin_memory=False):
test_size = (len(dataset) - val_size)
(dataset_val, dataset_test) = data_utils.random_split(dataset, (val_size, test_size), generator=torch.Generator().manual_seed(42))
val_loader = data_utils.DataLoader(dataset_val, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
test_loader = data_utils.DataLoader(dataset_test, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
return (val_loader, test_loader) |
class ExperimentConfig(BaseConfig):
wandb: Any
steps: Steps
framework: str
loss: LossConfig
network: NetworkConfig
conv: ConvolutionConfig
net_weights: NetWeights
dynamics: DynamicsConfig
learning_rate: LearningRateConfig
annealing_schedule: AnnealingSchedule
gradient_accumulation_steps: int = 1
restore: bool = True
save: bool = True
c1: float = 0.0
port: str = '2345'
compile: bool = True
profile: bool = False
init_aim: bool = True
init_wandb: bool = True
use_wandb: bool = True
use_tb: bool = False
debug_mode: bool = False
default_mode: bool = True
print_config: bool = True
precision: str = 'float32'
ignore_warnings: bool = True
backend: str = 'hvd'
seed: Optional[int] = None
ds_config_path: Optional[Any] = None
name: Optional[str] = None
name: Optional[str] = None
width: Optional[int] = None
nchains: Optional[int] = None
compression: Optional[str] = None
def __post_init__(self):
self.env_config = EnvConfig()
if (self.seed is None):
import numpy as np
self.seed = np.random.randint(0)
logger.warning(f'No seed specified, using random seed: {self.seed}')
self.env = EnvConfig()
self.ds_config = {}
self.xdim = self.dynamics.xdim
self.xshape = self.dynamics.xshape
self.micro_batch_size = self.dynamics.nchains
self.global_batch_size = ((self.env.world_size * self.micro_batch_size) * self.gradient_accumulation_steps)
if (self.ds_config_path is None):
fpath = Path(CONF_DIR).joinpath('ds_config.yaml')
self.ds_config_path = fpath.resolve().as_posix()
if (self.precision in FP16_SYNONYMS):
self.precision = 'fp16'
elif (self.precision in BF16_SYNONYMS):
self.precision = 'bf16'
elif (self.precision in FP32_SYNONYMS):
self.precision = 'float32'
elif (self.precision in FP64_SYNONYMS):
self.precision = 'float64'
w = int(os.environ.get('COLUMNS', 200))
self.width = (w if (self.width is None) else self.width)
if (self.framework in SYNONYMS['tensorflow']):
self.backend = 'hvd'
elif (self.framework in SYNONYMS['pytorch']):
if (self.backend is None):
logger.warning('Backend not specified, using DDP')
self.backend = 'DDP'
assert (self.backend.lower() in ['hvd', 'horovod', 'ddp', 'ds', 'deepspeed'])
else:
raise ValueError(f'Unexpected value for framework: {self.framework}')
if self.debug_mode:
self.compile = False
self.annealing_schedule.setup(nera=self.steps.nera, nepoch=self.steps.nepoch)
def load_ds_config(self, fpath: Optional[os.PathLike]=None) -> dict:
fname = (self.ds_config_path if (fpath is None) else fpath)
assert (fname is not None)
ds_config_path = Path(fname)
logger.info(f'Loading DeepSpeed Config from: {ds_config_path.as_posix()}')
if (ds_config_path.suffix == '.json'):
with ds_config_path.open('r') as f:
ds_config = json.load(f)
return ds_config
if (ds_config_path.suffix == '.yaml'):
import yaml
with ds_config_path.open('r') as stream:
ds_config = dict(yaml.safe_load(stream))
return ds_config
raise TypeError('Unexpected FileType')
def set_ds_config(self, ds_config: dict) -> None:
self.ds_config = ds_config
def to_str(self) -> str:
dynstr = self.dynamics.to_str()
constr = self.conv.to_str()
netstr = self.network.to_str()
return '/'.join([dynstr, constr, netstr, self.framework])
def get_checkpoint_dir(self) -> Path:
return Path(CHECKPOINTS_DIR).joinpath(self.to_str())
def rank(self):
if (self.framework in SYNONYMS['pytorch']):
if (self.backend.lower() in SYNONYMS['horovod']):
import horovod.torch as hvd
if (not hvd.is_initialized()):
hvd.init()
return hvd.rank()
elif (self.backend.lower() in SYNONYMS['DDP']):
return int(os.environ.get('RANK', 0))
elif (self.backend.lower() in SYNONYMS['deepspeed']):
import torch.distributed as dist
return dist.get_rank()
elif (self.framework in SYNONYMS['tensorflow']):
import horovod.tensorflow as hvd
if (not hvd.is_initialized()):
hvd.init()
return hvd.rank() |
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = grad_output.repeat(1, (in_channels // groups), 1, 1, 1)
grad_output = grad_output.contiguous().view((grad_output.shape[0] * grad_output.shape[1]), 1, grad_output.shape[2], grad_output.shape[3], grad_output.shape[4])
input = input.contiguous().view(1, (input.shape[0] * input.shape[1]), input.shape[2], input.shape[3], input.shape[4])
grad_weight = torch.conv3d(input, grad_output, None, dilation, padding, stride, (in_channels * min_batch))
grad_weight = grad_weight.contiguous().view(min_batch, (grad_weight.shape[1] // min_batch), grad_weight.shape[2], grad_weight.shape[3], grad_weight.shape[4])
return grad_weight.sum(dim=0).view((in_channels // groups), out_channels, grad_weight.shape[2], grad_weight.shape[3], grad_weight.shape[4]).transpose(0, 1).narrow(2, 0, weight_size[2]).narrow(3, 0, weight_size[3]).narrow(4, 0, weight_size[4]) |
def log_custom(new_meter_fn: Callable[([], Meter)], key: str, *args, priority: int=50, **kwargs):
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs) |
def vset(seq, idfun=None, as_list=True):
def _uniq_normal(seq):
d_ = {}
for s in seq:
if (s not in d_):
d_[s] = None
(yield s)
def _uniq_idfun(seq, idfun):
d_ = {}
for s in seq:
h_ = idfun(s)
if (h_ not in d_):
d_[h_] = None
(yield s)
if (idfun is None):
res = _uniq_normal(seq)
else:
res = _uniq_idfun(seq, idfun)
return (list(res) if as_list else res) |
def person_embed(speaker_ids, person_vec):
speaker_vec = []
for t in speaker_ids:
speaker_vec.append([(person_vec[int(i)].tolist() if (i != (- 1)) else ([0] * 100)) for i in t])
speaker_vec = torch.FloatTensor(speaker_vec)
return speaker_vec |
def best_saving(working_dir, epoch, model, fusion_model, optimizer):
best_name = '{}/model_best.pt'.format(working_dir)
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'fusion_model_state_dict': fusion_model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, best_name) |
def train_epoch(logger, model, optimizer, scheduler, dataset, train=True):
model.train()
time_start = time.time()
for i in range((len(dataset) - cfg.transaction.horizon)):
optimizer.zero_grad()
batch = dataset[i].clone()
pdb.set_trace()
batch.node_degree_new = node_degree(batch.edge_index, n=batch.node_degree_existing.shape[0])
(edge_label, edge_label_index) = get_edge_label(dataset, i, cfg.transaction.horizon, cfg.transaction.pred_mode)
batch.edge_label = edge_label
batch.edge_label_index = edge_label_index
batch.to(torch.device(cfg.device))
(pred, true) = model(batch)
(loss, pred_score) = compute_loss(pred, true)
if train:
loss.backward()
optimizer.step()
logger.update_stats(true=true.detach().cpu(), pred=pred_score.detach().cpu(), loss=loss.item(), lr=scheduler.get_last_lr()[0], time_used=(time.time() - time_start), params=cfg.params)
time_start = time.time()
if train:
scheduler.step() |
def cnnmodel(frame1_xyz, frame1_rgb, frame2_xyz, frame2_rgb):
frame1_rgb = tf.image.resize_images(frame1_rgb, [480, 640])
frame2_rgb = tf.image.resize_images(frame2_rgb, [480, 640])
(frame1_feat_rgb, _) = get_network('resnet50', frame1_rgb, weight_decay=1e-05, is_training=True)
(frame2_feat_rgb, _) = get_network('resnet50', frame2_rgb, weight_decay=1e-05, is_training=True, reuse=True)
frame1_feat = encoder(frame1_xyz)
frame2_feat = encoder(frame2_xyz, reuse=True)
cc_o = correlation(frame2_feat_rgb, frame1_feat_rgb, 1, rad, 1, 1, rad)
return cc_o |
def main(unused_argv):
tf.config.experimental.set_visible_devices([], 'GPU')
tf.config.experimental.set_visible_devices([], 'TPU')
config = configs.load_config(save_config=False)
dataset = datasets.load_dataset('test', config.data_dir, config)
(model, init_variables) = models.construct_mipnerf(random.PRNGKey(), dataset.peek()['rays'], config)
optimizer = flax.optim.Adam(config.lr_init).create(init_variables)
state = utils.TrainState(optimizer=optimizer)
del optimizer, init_variables
def render_eval_fn(variables, _, rays):
return jax.lax.all_gather(model.apply(variables, None, rays, resample_padding=config.resample_padding_final, compute_extras=True), axis_name='batch')
render_eval_pfn = jax.pmap(render_eval_fn, in_axes=(None, None, 0), donate_argnums=2, axis_name='batch')
def ssim_fn(x, y):
return structural_similarity(x, y, multichannel=True, data_range=1.0, win_size=11, gaussian_weights=True, sigma=1.5, use_sample_covariance=False, K1=0.01, K2=0.03)
census_fn = jax.jit(functools.partial(math.compute_census_err, epsilon=CENSUS_EPSILON))
def load_lpips():
graph = tf.compat.v1.Graph()
session = tf.compat.v1.Session(graph=graph)
with graph.as_default():
input1 = tf.compat.v1.placeholder(tf.float32, [None, None, 3])
input2 = tf.compat.v1.placeholder(tf.float32, [None, None, 3])
with tf.compat.v1.gfile.Open('alex_net.pb', 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
target = tf.compat.v1.transpose(((input1[tf.compat.v1.newaxis] * 2.0) - 1.0), [0, 3, 1, 2])
pred = tf.compat.v1.transpose(((input2[tf.compat.v1.newaxis] * 2.0) - 1.0), [0, 3, 1, 2])
tf.compat.v1.import_graph_def(graph_def, input_map={'0:0': target, '1:0': pred})
distance = graph.get_operations()[(- 1)].outputs[0]
def lpips_distance(img1, img2):
with graph.as_default():
return session.run(distance, {input1: img1, input2: img2})[(0, 0, 0, 0)]
return lpips_distance
if config.eval_disable_lpips:
lpips_fn = (lambda x, y: np.nan)
print('WARNING: LPIPS calculation not supported. NaN values used instead.')
else:
lpips_fn = load_lpips()
print('Activate LPIPS calculation with AlexNet.')
last_step = 0
out_dir = path.join(config.checkpoint_dir, ('path_renders' if config.render_path else 'test_preds'))
path_fn = (lambda x: path.join(out_dir, x))
if (not config.eval_only_once):
summary_writer = tensorboard.SummaryWriter(path.join(config.checkpoint_dir, 'eval'))
while True:
try:
state = checkpoints.restore_checkpoint(config.checkpoint_dir, state)
except:
print('Using pre-trained model.')
state_dict = checkpoints.restore_checkpoint(config.checkpoint_dir, None)
for i in [9, 17]:
del state_dict['optimizer']['target']['params']['MLP_0'][f'Dense_{i}']
state_dict['optimizer']['target']['params']['MLP_0']['Dense_9'] = state_dict['optimizer']['target']['params']['MLP_0']['Dense_18']
state_dict['optimizer']['target']['params']['MLP_0']['Dense_10'] = state_dict['optimizer']['target']['params']['MLP_0']['Dense_19']
state_dict['optimizer']['target']['params']['MLP_0']['Dense_11'] = state_dict['optimizer']['target']['params']['MLP_0']['Dense_20']
del state_dict['optimizerd']
state = flax.serialization.from_state_dict(state, state_dict)
step = int(state.optimizer.state.step)
if (step <= last_step):
print(f'Checkpoint step {step} <= last step {last_step}, sleeping.')
time.sleep(10)
continue
print(f'Evaluating checkpoint at step {step}.')
if (config.eval_save_output and (not utils.isdir(out_dir))):
utils.makedirs(out_dir)
key = random.PRNGKey((0 if config.deterministic_showcase else step))
perm = random.permutation(key, dataset.size)
showcase_indices = np.sort(perm[:config.num_showcase_images])
metrics = []
showcases = []
for idx in range(dataset.size):
print(f'Evaluating image {(idx + 1)}/{dataset.size}')
eval_start_time = time.time()
batch = next(dataset)
rendering = models.render_image(functools.partial(render_eval_pfn, state.optimizer.target), batch['rays'], None, config)
print(f'Rendered in {(time.time() - eval_start_time):0.3f}s')
if (jax.host_id() != 0):
continue
if ((not config.eval_only_once) and (idx in showcase_indices)):
showcase_idx = (idx if config.deterministic_showcase else len(showcases))
showcases.append((showcase_idx, rendering, batch))
if (not config.render_path):
metric = {}
metric['psnr'] = float(math.mse_to_psnr(((rendering['rgb'] - batch['rgb']) ** 2).mean()))
metric['ssim'] = float(ssim_fn(rendering['rgb'], batch['rgb']))
metric['lpips'] = float(lpips_fn(rendering['rgb'], batch['rgb']))
metric['avg_err'] = float(math.compute_avg_error(psnr=metric['psnr'], ssim=metric['ssim'], lpips=metric['lpips']))
metric['census_err'] = float(census_fn(rendering['rgb'], batch['rgb']))
if config.compute_disp_metrics:
disp = (1 / (1 + rendering['distance_mean']))
metric['disp_mse'] = float(((disp - batch['disps']) ** 2).mean())
if config.compute_normal_metrics:
one_eps = (1 - np.finfo(np.float32).eps)
metric['normal_mae'] = float(np.arccos(np.clip(np.sum((batch['normals'] * rendering['normals']), axis=(- 1)), (- one_eps), one_eps)).mean())
weights = (rendering['acc'] * batch['alphas'])
normalized_normals_gt = (batch['normals'] / np.sqrt(np.maximum(np.sum((batch['normals'] ** 2), axis=(- 1), keepdims=True), (- (one_eps - 1)))))
normalized_normals = (rendering['normals'] / np.sqrt(np.maximum(np.sum((rendering['normals'] ** 2), axis=(- 1), keepdims=True), (- (one_eps - 1)))))
metric['weighted_normal_mae'] = ((((weights * np.arccos(np.clip((normalized_normals * normalized_normals_gt).sum((- 1)), (- one_eps), one_eps))).sum() / weights.sum()) * 180.0) / np.pi)
if (config.dataset_loader == 'dtu'):
rgb = batch['rgb']
rgb_hat = rendering['rgb']
mask = batch['mask']
mask_bin = (mask == 1.0)
rgb_fg = ((rgb * mask) + (1 - mask))
rgb_hat_fg = ((rgb_hat * mask) + (1 - mask))
metric['psnr_masked'] = float(math.mse_to_psnr(((rgb - rgb_hat)[mask_bin] ** 2).mean()))
metric['ssim_masked'] = float(ssim_fn(rgb_hat_fg, rgb_fg))
metric['lpips_masked'] = float(lpips_fn(rgb_hat_fg, rgb_fg))
metric['avg_err_masked'] = float(math.compute_avg_error(psnr=metric['psnr_masked'], ssim=metric['ssim_masked'], lpips=metric['lpips_masked']))
for (m, v) in metric.items():
print(f'{m:10s} = {v:.4f}')
metrics.append(metric)
if (config.eval_save_output and (config.eval_render_interval > 0)):
if ((idx % config.eval_render_interval) == 0):
utils.save_img_u8(rendering['rgb'], path_fn(f'color_{idx:03d}.png'))
utils.save_img_u8(((rendering['normals'] / 2.0) + 0.5), path_fn(f'normals_{idx:03d}.png'))
utils.save_img_f32(rendering['distance_mean'], path_fn(f'distance_mean_{idx:03d}.tiff'))
utils.save_img_f32(rendering['distance_median'], path_fn(f'distance_median_{idx:03d}.tiff'))
utils.save_img_f32(rendering['acc'], path_fn(f'acc_{idx:03d}.tiff'))
for (k, v) in vis.visualize_suite(rendering, batch['rays'], config).items():
if ((k == 'depth_mean') or (k == 'depth_median') or (k == 'depth_std') or (k == 'rgb_std')):
utils.save_img_u8(v, path_fn(f'{k}_{idx:03d}.png'))
if ((not config.eval_only_once) and (jax.host_id() == 0)):
for name in list(metrics[0].keys()):
summary_writer.scalar(name, np.mean([m[name] for m in metrics]), step)
for (i, r, b) in showcases:
for (k, v) in vis.visualize_suite(r, b['rays'], config).items():
summary_writer.image(f'pred_{k}_{i}', v, step)
if (not config.render_path):
summary_writer.image(f'target_{i}', b['rgb'], step)
if (config.eval_save_output and (not config.render_path) and (jax.host_id() == 0)):
for name in list(metrics[0].keys()):
with utils.open_file(path_fn(f'metric_{name}_{step}.txt'), 'w') as f:
f.write(' '.join([str(m[name]) for m in metrics]))
if config.eval_only_once:
break
if (int(step) >= config.max_steps):
break
last_step = step |
def test_shapefactor(backend):
mc = MockConfig(par_map={'shapefac1': {'paramset': unconstrained(name='shapefac1', is_scalar=False, n_parameters=1, inits=[0], bounds=[[0, 10]], fixed=False), 'slice': slice(0, 1)}, 'shapefac2': {'paramset': unconstrained(name='shapefac2', is_scalar=False, n_parameters=2, inits=[0, 0], bounds=[[0, 10], [0, 10]], fixed=False), 'slice': slice(1, 3)}}, par_order=['shapefac1', 'shapefac2'], samples=['signal', 'background'], channels=['chan_one', 'chan_two'], channel_nbins={'chan_one': 1, 'chan_two': 2})
mega_mods = {'shapefactor/shapefac1': {'signal': {'type': 'shapefactor', 'name': 'shapefac1', 'data': {'mask': [True, False, False]}}, 'background': {'type': 'shapefactor', 'name': 'shapefac1', 'data': {'mask': [True, False, False]}}}, 'shapefactor/shapefac2': {'signal': {'type': 'shapefactor', 'name': 'shapefac2', 'data': {'mask': [False, True, True]}}, 'background': {'type': 'normfactor', 'name': 'shapefac2', 'data': {'mask': [False, True, True]}}}}
hsc = shapefactor_combined([('shapefac1', 'shapefactor'), ('shapefac2', 'shapefactor')], mc, mega_mods)
mod = hsc.apply(pyhf.tensorlib.astensor([2.0, 3.0, 4.0]))
shape = pyhf.tensorlib.shape(mod)
assert (shape == (2, 2, 1, 3))
mod = np.asarray(pyhf.tensorlib.tolist(mod))
assert np.allclose(mod[(0, 0, 0)], [2.0, 1.0, 1.0])
assert np.allclose(mod[(1, 0, 0)], [1.0, 3.0, 4.0])
hsc = shapefactor_combined([('shapefac1', 'shapefactor'), ('shapefac2', 'shapefactor')], mc, mega_mods, batch_size=4)
mod = hsc.apply(pyhf.tensorlib.astensor([[2.0, 3.0, 4.0], [5.0, 6.0, 7.0], [8.0, 9.0, 10.0], [11.0, 12.0, 13.0]]))
shape = pyhf.tensorlib.shape(mod)
assert (shape == (2, 2, 4, 3))
mod = np.asarray(pyhf.tensorlib.tolist(mod))
assert np.allclose(mod[(0, 0, 0)], [2.0, 1.0, 1.0])
assert np.allclose(mod[(0, 0, 1)], [5.0, 1.0, 1.0])
assert np.allclose(mod[(0, 0, 2)], [8.0, 1.0, 1.0])
assert np.allclose(mod[(0, 0, 3)], [11.0, 1.0, 1.0])
assert np.allclose(mod[(1, 0, 0)], [1.0, 3.0, 4.0])
assert np.allclose(mod[(1, 0, 1)], [1.0, 6.0, 7.0])
assert np.allclose(mod[(1, 0, 2)], [1.0, 9.0, 10.0])
assert np.allclose(mod[(1, 0, 3)], [1.0, 12.0, 13.0]) |
def _make_tuple_bunch(typename, field_names, extra_field_names=None, module=None):
if (len(field_names) == 0):
raise ValueError('field_names must contain at least one name')
if (extra_field_names is None):
extra_field_names = []
_validate_names(typename, field_names, extra_field_names)
typename = _sys.intern(str(typename))
field_names = tuple(map(_sys.intern, field_names))
extra_field_names = tuple(map(_sys.intern, extra_field_names))
all_names = (field_names + extra_field_names)
arg_list = ', '.join(field_names)
full_list = ', '.join(all_names)
repr_fmt = ''.join(('(', ', '.join((f'{name}=%({name})r' for name in all_names)), ')'))
tuple_new = tuple.__new__
(_dict, _tuple, _zip) = (dict, tuple, zip)
s = f'''def __new__(_cls, {arg_list}, **extra_fields):
return _tuple_new(_cls, ({arg_list},))
def __init__(self, {arg_list}, **extra_fields):
for key in self._extra_fields:
if key not in extra_fields:
raise TypeError("missing keyword argument '%s'" % (key,))
for key, val in extra_fields.items():
if key not in self._extra_fields:
raise TypeError("unexpected keyword argument '%s'" % (key,))
self.__dict__[key] = val
def __setattr__(self, key, val):
if key in {repr(field_names)}:
raise AttributeError("can't set attribute %r of class %r"
% (key, self.__class__.__name__))
else:
self.__dict__[key] = val
'''
del arg_list
namespace = {'_tuple_new': tuple_new, '__builtins__': dict(TypeError=TypeError, AttributeError=AttributeError), '__name__': f'namedtuple_{typename}'}
exec(s, namespace)
__new__ = namespace['__new__']
__new__.__doc__ = f'Create new instance of {typename}({full_list})'
__init__ = namespace['__init__']
__init__.__doc__ = f'Instantiate instance of {typename}({full_list})'
__setattr__ = namespace['__setattr__']
def __repr__(self):
return (self.__class__.__name__ + (repr_fmt % self._asdict()))
def _asdict(self):
out = _dict(_zip(self._fields, self))
out.update(self.__dict__)
return out
def __getnewargs_ex__(self):
return (_tuple(self), self.__dict__)
for method in (__new__, __repr__, _asdict, __getnewargs_ex__):
method.__qualname__ = f'{typename}.{method.__name__}'
class_namespace = {'__doc__': f'{typename}({full_list})', '_fields': field_names, '__new__': __new__, '__init__': __init__, '__repr__': __repr__, '__setattr__': __setattr__, '_asdict': _asdict, '_extra_fields': extra_field_names, '__getnewargs_ex__': __getnewargs_ex__}
for (index, name) in enumerate(field_names):
def _get(self, index=index):
return self[index]
class_namespace[name] = property(_get)
for name in extra_field_names:
def _get(self, name=name):
return self.__dict__[name]
class_namespace[name] = property(_get)
result = type(typename, (tuple,), class_namespace)
if (module is None):
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if (module is not None):
result.__module__ = module
__new__.__module__ = module
return result |
def pre_release_work(patch=False):
default_version = get_version()
if (patch and default_version.is_devrelease):
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f'{default_version.major}.{default_version.minor}.{(default_version.micro + 1)}'
else:
default_version = f'{default_version.major}.{(default_version.minor + 1)}.0'
version = input(f'Which version are you releasing? [{default_version}]')
if (len(version) == 0):
version = default_version
print(f'Updating version to {version}.')
global_version_update(version, patch=patch)
if (not patch):
print('Cleaning main README')
clean_master_ref_in_model_list() |
class InfinitePolynomialRing_dense(InfinitePolynomialRing_sparse):
def __init__(self, R, names, order):
if (not names):
names = ['x']
self._max = 0
InfinitePolynomialRing_sparse.__init__(self, R, names, order)
self._P = self._minP
def construction(self):
return [InfinitePolynomialFunctor(self._names, self._order, 'dense'), self._base]
def tensor_with_ring(self, R):
if (not R.has_coerce_map_from(self._underlying_ring)):
raise TypeError(('we cannot tensor with ' + repr(R)))
B = self.base_ring()
if hasattr(B, 'tensor_with_ring'):
return InfinitePolynomialRing(B.tensor_with_ring(R), self._names, self._order, implementation='dense')
if hasattr(B, 'change_ring'):
return InfinitePolynomialRing(B.change_ring(R), self._names, self._order, implementation='dense')
try:
o = (B.one() * R.one())
except Exception:
raise TypeError(('we cannot tensor with ' + repr(R)))
return InfinitePolynomialRing(o.parent(), self._names, self._order, implementation='dense')
def polynomial_ring(self):
return self._P |
def param_analysis_options(output_dir):
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['params', 'bytes']
options['order_by'] = 'params'
options['account_type_regexes'] = ['Variable']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'params.txt')
return ('scope', options) |
def dump_paths(Graph, rating_pair, maxLen, sample_size, fw_file):
for pair in rating_pair:
user_id = pair[0]
location_id = pair[1]
user_node = ('u' + user_id)
location_node = ('i' + location_id)
if (Graph.has_node(user_node) and Graph.has_node(location_node)):
mine_paths_between_nodes(Graph, user_node, location_node, maxLen, sample_size, fw_file) |
def read_in_all_data(data_path=DATA_PATH):
training_data = json.load(open(os.path.join(data_path, 'train_spider.json')))
tables_org = json.load(open(os.path.join(data_path, 'tables.json')))
tables = {tab['db_id']: tab for tab in tables_org}
return (training_data, tables) |
def generate_length(args, tr_set, audio_extension):
for (i, s) in enumerate(tr_set):
if os.path.isdir(os.path.join(args.input_data, s.lower())):
s = s.lower()
elif os.path.isdir(os.path.join(args.input_data, s.upper())):
s = s.upper()
else:
assert NotImplementedError
print('')
todo = list(Path(os.path.join(args.input_data, s)).rglob(('*' + audio_extension)))
print(f'Preprocessing data in: {s}, {len(todo)} audio files found.')
output_dir = os.path.join(args.output_path, args.name)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
print('Extracting audio length...', flush=True)
tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_length)(str(file)) for file in tqdm(todo)))
sorted_todo = [os.path.join(s, str(todo[idx]).split((s + '/'))[(- 1)]) for idx in reversed(np.argsort(tr_x))]
df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None})
df.to_csv(os.path.join(output_dir, (tr_set[i] + '.csv')))
print('All done, saved at', output_dir, 'exit.') |
def sine_init(m):
with torch.no_grad():
if hasattr(m, 'weight'):
num_input = m.weight.size((- 1))
m.weight.uniform_(((- np.sqrt((6 / num_input))) / 30), (np.sqrt((6 / num_input)) / 30)) |
class AttentionModule(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.dim_v = kwargs['dim_v']
self.attendNode = AttendNodeModule()
self.attnAnd = AndModule()
def forward(self, attn, feat, query):
new_attn = self.attendNode(feat, query)
out = self.attnAnd(attn, new_attn)
return out |
def pred(datasource, estimator_string, select, result_table, feature_columns, feature_column_names, feature_column_names_map, train_label_name, result_col_name, feature_metas={}, model_params={}, pred_params={}, save='', batch_size=1, pai_table=''):
estimator = import_model(estimator_string)
model_params.update(feature_columns)
is_estimator = is_tf_estimator(estimator)
if (pai_table != ''):
conn = PaiIOConnection.from_table(pai_table)
selected_cols = db.selected_cols(conn, None)
predict_generator = db.db_generator(conn, None)
else:
conn = db.connect_with_data_source(datasource)
selected_cols = db.selected_cols(conn, select)
predict_generator = db.db_generator(conn, select)
pop_optimizer_and_loss(model_params)
if (pred_params is None):
extra_result_cols = []
else:
extra_result_cols = pred_params.get('extra_outputs', '')
extra_result_cols = [c.strip() for c in extra_result_cols.split(',') if c.strip()]
if (not is_estimator):
if (not issubclass(estimator, tf.keras.Model)):
model_params['field_metas'] = feature_metas
print('Start predicting using keras model...')
keras_predict(estimator, model_params, save, result_table, feature_column_names, feature_metas, train_label_name, result_col_name, conn, predict_generator, selected_cols, extra_result_cols)
else:
model_params['model_dir'] = save
print('Start predicting using estimator model...')
estimator_predict(result_table, feature_column_names, feature_metas, train_label_name, result_col_name, conn, predict_generator, selected_cols)
print(('Done predicting. Predict table : %s' % result_table)) |
('dependency_label')
class DepLabelIndexer(TokenIndexer[int]):
def __init__(self, namespace: str='dep_labels') -> None:
self.namespace = namespace
self._logged_errors: Set[str] = set()
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
dep_label = token.dep_
if (not dep_label):
if (token.text not in self._logged_errors):
logger.warning('Token had no dependency label: %s', token.text)
self._logged_errors.add(token.text)
dep_label = 'NONE'
counter[self.namespace][dep_label] += 1
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[(str, List[int])]:
dep_labels = [(token.dep_ or 'NONE') for token in tokens]
return {index_name: [vocabulary.get_token_index(dep_label, self.namespace) for dep_label in dep_labels]}
def get_padding_token(self) -> int:
return 0
def get_padding_lengths(self, token: int) -> Dict[(str, int)]:
return {}
def pad_token_sequence(self, tokens: Dict[(str, List[int])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, List[int])]:
return {key: pad_sequence_to_length(val, desired_num_tokens[key]) for (key, val) in tokens.items()} |
def test(epoch):
global best_acc
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(testloader):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs, None, None, mode='test')
loss = criterion(outputs, targets)
test_loss += loss.item()
(_, predicted) = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('------> epoch: {} --> Test loss = {:.4f} Test Accuracy = {:.4f} '.format(epoch, (test_loss / len(testloader.dataset)), ((100.0 * correct) / len(testloader.dataset))))
acc = ((100.0 * correct) / total)
if (acc > best_acc):
checkpoint(acc, epoch)
best_acc = acc
return best_acc |
('/api/v1.0/bird', methods=['POST'])
def create_bird():
if ((not request.json) or (not ('caption' in request.json))):
abort(400)
caption = request.json['caption']
t0 = time.time()
urls = generate(caption, wordtoix, ixtoword, text_encoder, netG, blob_service)
t1 = time.time()
response = {'small': urls[0], 'medium': urls[1], 'large': urls[2], 'map1': urls[3], 'map2': urls[4], 'caption': caption, 'elapsed': (t1 - t0)}
return (jsonify({'bird': response}), 201) |
def time_add(t1, t2, all_seconds=False):
st1 = time_to_seconds(t1)
st2 = time_to_seconds(t2)
return seconds_to_time((st1 + st2), all_seconds) |
def eval(opt):
model = CycleGANModel(opt)
dataset = Mydata.get_loader(opt)
(img_logs, weight_logs) = init_logs(opt)
model.load(weight_logs)
for (batch_id, data) in enumerate(dataset):
print('===> Epoch({}/{})'.format(batch_id, len(dataset)))
model.set_input(data)
model.test()
path = os.path.join(img_logs, 'imgA_{}.jpg'.format(batch_id))
model.visual(path) |
class Squares(object):
def __init__(self):
super(Squares, self).__init__()
self.template = 'inputs: {inputs}\noutput: {output}\nconst: {const}\naggrs: {aggrs}\nattrs: {attrs}\nbools:\nloc: {loc}\n'
def synthesize(self, inputs, output_ex, const='', aggrs='', attrs='', loc=0):
global argv, dir
dir = '../'
ins = list([])
temp = self.template
try:
(path, dirs, files) = next(os.walk('../users/files'))
except:
(path, dirs, files) = next(os.walk('users/files'))
dir = './'
file_count = str((len(files) + 1))
i_c = 0
for i in inputs:
input = open(((((dir + 'users/tables/') + 'i') + str(file_count)) + str(i_c)), 'w+')
input.write(i)
input.close()
ins.append(((((dir + 'users/tables/') + 'i') + str(file_count)) + str(i_c)))
i_c += 1
output = open((((dir + 'users/tables/') + 'o') + str(file_count)), 'w+')
output.write(output_ex)
output.close()
output = ((dir + 'users/tables/o') + str(file_count))
input_file_name = (((dir + 'users/files/') + 'f') + str(file_count))
input_file = open(input_file_name, 'w+')
inputs = str(ins).replace("'", '').replace(']', '').replace('[', '')
input_file.write(temp.format(inputs=inputs, output=output, const=(('"' + const.replace(',', '","').replace(' ', '')) + '"'), aggrs=(('"' + aggrs.replace(',', '","').replace(' ', '')) + '"'), attrs=(('"' + attrs.replace(',', '","').replace(' ', '')) + '"'), loc=str(loc)).replace('""', ''))
input_file.close()
argv = []
argv.append('lines')
argv.append(input_file_name)
return main() |
def mrmr_regression(df, target_column, K, features=None, denominator='mean', only_same_domain=False, return_scores=False, show_progress=True):
if (features is None):
features = get_numeric_features(df=df, target_column=target_column)
if ((type(denominator) == str) and (denominator == 'mean')):
denominator_func = np.mean
elif ((type(denominator) == str) and (denominator == 'max')):
denominator_func = np.max
elif (type(denominator) == str):
raise ValueError("Invalid denominator function. It should be one of ['mean', 'max'].")
else:
denominator_func = denominator
relevance_args = {'target_column': target_column, 'features': features, 'df': df}
redundancy_args = {'df': df}
return mrmr_base(K=K, relevance_func=f_regression, redundancy_func=correlation, relevance_args=relevance_args, redundancy_args=redundancy_args, denominator_func=denominator_func, only_same_domain=only_same_domain, return_scores=return_scores, show_progress=show_progress) |
def get_root_logger(log_file=None, log_level=logging.INFO):
return get_logger('mmhuman3d', log_file, log_level) |
def read_dataset_t2t_format(data_dir, num_parallel_calls, mode, max_frames, max_symbols, t2t_problem_name, features_hparams_override=''):
class CustomProblem(SpeechRecognitionProblem):
def hparams(self, defaults, model_hparams):
super().hparams(defaults, model_hparams)
model_hparams.parse(features_hparams_override)
problem = CustomProblem()
problem.name = t2t_problem_name
speech_params = transformer_librispeech_tpu()
speech_params.max_input_seq_length = max_frames
speech_params.max_target_seq_length = max_symbols
dataset = problem.dataset(mode, data_dir=data_dir, num_threads=num_parallel_calls, hparams=speech_params)
return dataset |
def test_optimization_result_status_for_failed_optimization() -> None:
result: OptimizationResult[object] = OptimizationResult(Err(_Whoops()), [])
assert result.is_err
assert (not result.is_ok) |
class CodeGenForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def register_Ns3HtOperationValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::HtOperation const &', 'value')])
cls.add_constructor([param('ns3::HtOperationValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::HtOperation', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::HtOperation const &', 'value')])
return |
class WeakHopper(ModifiableRoboschoolHopper):
def __init__(self):
RoboschoolForwardWalkerMujocoXML.__init__(self, 'hopper.xml', 'torso', action_dim=3, obs_dim=15, power=0.4)
def parameters(self):
parameters = super(WeakHopper, self).parameters
parameters.update({'power': self.power})
return parameters |
_numpy_output(check_dtype=True)
def test_ufunc_logical_or_ff(A: dace.float32[10], B: dace.float32[10]):
return np.logical_or(A, B) |
_test(assert_ii_1=False)
def test_4_interface_to_2_banks_hbm_decoupled_interface():
return four_interface_to_2_banks(mem_type='HBM', decouple_interfaces=True) |
def get_class_name_lineno(method) -> Tuple[(str, int)]:
current_frame = inspect.currentframe()
for i in range(2):
assert (current_frame is not None)
current_frame = current_frame.f_back
assert (current_frame is not None)
class_name = current_frame.f_code.co_name
line_no = current_frame.f_code.co_firstlineno
return (class_name, line_no) |
def test_fortran_frontend_arr2loop_2d():
test_string = '\n PROGRAM index_offset_test\n implicit none\n double precision, dimension(5,3) :: d\n double precision, dimension(4) :: res\n CALL index_test_function(d,res)\n end\n\n SUBROUTINE index_test_function(d, res)\n double precision, dimension(5,3) :: d\n double precision, dimension(4) :: res\n\n res(1) = SUM(d)\n res(2) = SUM(d(:,:))\n res(3) = SUM(d(2:4, 2))\n res(4) = SUM(d(2:4, 2:3))\n\n END SUBROUTINE index_test_function\n '
sdfg = fortran_parser.create_sdfg_from_string(test_string, 'index_offset_test', True)
sdfg.simplify(verbose=True)
sdfg.compile()
sizes = [5, 3]
d = np.full(sizes, 42, order='F', dtype=np.float64)
cnt = 0
for i in range(sizes[0]):
for j in range(sizes[1]):
d[(i, j)] = cnt
cnt += 1
res = np.full([4], 42, order='F', dtype=np.float64)
sdfg(d=d, res=res)
assert (res[0] == 105)
assert (res[1] == 105)
assert (res[2] == 21)
assert (res[3] == 45) |
class RNet(nn.Module):
def __init__(self):
super(RNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv1', nn.Conv2d(3, 28, 3, 1)), ('prelu1', nn.PReLU(28)), ('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)), ('conv2', nn.Conv2d(28, 48, 3, 1)), ('prelu2', nn.PReLU(48)), ('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)), ('conv3', nn.Conv2d(48, 64, 2, 1)), ('prelu3', nn.PReLU(64)), ('flatten', Flatten()), ('conv4', nn.Linear(576, 128)), ('prelu4', nn.PReLU(128))]))
self.conv5_1 = nn.Linear(128, 2)
self.conv5_2 = nn.Linear(128, 4)
weights = np.load('mtcnn_pytorch/src/weights/rnet.npy', allow_pickle=True)[()]
for (n, p) in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
x = self.features(x)
a = self.conv5_1(x)
b = self.conv5_2(x)
a = F.softmax(a, dim=(- 1))
return (b, a) |
.parametrize('observation_shape', [(100,)])
.parametrize('action_size', [2])
.parametrize('episode_length', [10])
def test_compare_discrete_action_diff_with_algos(observation_shape: Sequence[int], action_size: int, episode_length: int) -> None:
discrete_episode = create_episode(observation_shape, action_size, length=episode_length, discrete_action=True)
discrete_replay_buffer = _create_replay_buffer([discrete_episode])
dqn1 = DQNConfig().create()
dqn1.build_with_dataset(discrete_replay_buffer)
dqn2 = DQNConfig().create()
dqn2.build_with_dataset(discrete_replay_buffer)
CompareDiscreteActionMatchEvaluator(dqn1)(dqn2, discrete_replay_buffer) |
class SqueezeExcitation(nn.Module):
def __init__(self, n_channels, amplifying_ratio) -> None:
super(SqueezeExcitation, self).__init__()
self.n_channels = n_channels
self.amplifying_ratio = amplifying_ratio
n_channels_expanded = (self.amplifying_ratio * self.n_channels)
self.net = nn.Sequential(nn.Linear(self.n_channels, n_channels_expanded, bias=True), nn.ReLU(), nn.Linear(n_channels_expanded, self.n_channels, bias=True), nn.Sigmoid())
def forward(self, x: torch.Tensor):
x_in = x
x = torch.abs(x)
x = x.mean(dim=(- 1))
x = self.net(x).unsqueeze((- 1))
x = (x_in * x)
return x |
def read_in_samples_task1(dict_paragraphs, qrels, bm25_dir, no_hard_neg_docs):
samples = []
for query_id in qrels.keys():
print('now we start with this query {}'.format(query_id))
paragraph_id = 0
for paragraph in dict_paragraphs.get(query_id):
if dict_paragraphs.get(query_id).get(paragraph):
try:
query_text = dict_paragraphs.get(query_id).get(paragraph)
print('written in the query text')
positive_ctxs = []
for rel_id in qrels.get(query_id).keys():
doc_rel_dict = dict_paragraphs.get(rel_id)
i = 0
for (x, doc_rel_text) in doc_rel_dict.items():
if doc_rel_text:
ctx = {'title': '', 'text': doc_rel_text, 'psg_id': '{}_{}'.format(rel_id, i)}
positive_ctxs.append(ctx)
i += 1
print('done with positives')
with open(os.path.join(bm25_dir, 'bm25_top1000_{}_{}_separately_para_w_summ_intro.txt'.format(query_id, paragraph_id)), 'r', encoding='utf8') as out_file:
top1000 = out_file.read().splitlines()[:no_hard_neg_docs]
top1000_dict = {}
for top in top1000:
top1000_dict.update({top.split(' ')[0]: float(top.split(' ')[(- 1)].strip())})
print(top1000_dict)
print('read in the negatives')
print(qrels.get(query_id))
hard_negative_ctxs = []
irrel_ids = []
for (key, score) in top1000_dict.items():
if (key.split('_')[0] not in qrels.get(query_id).keys()):
irrel_ids.append(key)
print('these are my irrelevant ones {}'.format(irrel_ids))
for irrel_id in irrel_ids:
j = 0
for (x, doc_irrel_text) in dict_paragraphs.get(irrel_id.split('_')[0]).items():
if doc_irrel_text:
ctx = {'title': '', 'text': doc_irrel_text, 'score': top1000_dict.get(irrel_id), 'psg_id': '{}'.format(irrel_id)}
hard_negative_ctxs.append(ctx)
print('now im finished with the irrelids ')
hard_negative_ctxs.sort(key=(lambda hard_negative_ctxs: hard_negative_ctxs.get('score')), reverse=True)
print('now i sorted the hard negatives')
sample = {'question': query_text, 'answers': ['{}_{}'.format(query_id, paragraph_id)], 'positive_ctxs': positive_ctxs, 'negative_ctxs': [], 'hard_negative_ctxs': hard_negative_ctxs}
samples.append(sample)
except:
print('it didnt work for this file {}_{}'.format(query_id, paragraph_id))
print(paragraph_id)
paragraph_id += 1
else:
print('it didnt work for this paragraph {} {}'.format(query_id, paragraph_id))
print('finished with query {}'.format(query_id))
return samples |
class TrunkConfig():
num_blocks: int = 48
sequence_state_dim: int = 1024
pairwise_state_dim: int = 128
sequence_head_width: int = 32
pairwise_head_width: int = 32
position_bins: int = 32
dropout: float = 0
layer_drop: float = 0
cpu_grad_checkpoint: bool = False
max_recycles: int = 4
chunk_size: Optional[int] = 128
structure_module: 'StructureModuleConfig' = None
def __post_init__(self):
if (self.structure_module is None):
self.structure_module = StructureModuleConfig()
elif isinstance(self.structure_module, dict):
self.structure_module = StructureModuleConfig(**self.structure_module)
if (self.max_recycles <= 0):
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.')
if ((self.sequence_state_dim % self.sequence_state_dim) != 0):
raise ValueError(f'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got {self.sequence_state_dim} and {self.sequence_state_dim}.')
if ((self.pairwise_state_dim % self.pairwise_state_dim) != 0):
raise ValueError(f'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
sequence_num_heads = (self.sequence_state_dim // self.sequence_head_width)
pairwise_num_heads = (self.pairwise_state_dim // self.pairwise_head_width)
if (self.sequence_state_dim != (sequence_num_heads * self.sequence_head_width)):
raise ValueError(f'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if (self.pairwise_state_dim != (pairwise_num_heads * self.pairwise_head_width)):
raise ValueError(f'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if ((self.pairwise_state_dim % 2) != 0):
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if (self.dropout >= 0.4):
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.')
def to_dict(self):
output = asdict(self)
output['structure_module'] = self.structure_module.to_dict()
return output |
class OidDataset(Dataset):
def __init__(self, main_dir, subset, version='v4', annotation_cache_dir='.', transform=None):
if (version == 'v4'):
metadata = '2018_04'
elif (version == 'challenge2018'):
metadata = 'challenge2018'
elif (version == 'v3'):
metadata = '2017_11'
else:
raise NotImplementedError('There is currently no implementation for versions older than v3')
self.transform = transform
if (version == 'challenge2018'):
self.base_dir = os.path.join(main_dir, 'images', 'train')
else:
self.base_dir = os.path.join(main_dir, 'images', subset)
metadata_dir = os.path.join(main_dir, metadata)
annotation_cache_json = os.path.join(annotation_cache_dir, (subset + '.json'))
(self.id_to_labels, cls_index) = get_labels(metadata_dir, version=version)
if os.path.exists(annotation_cache_json):
with open(annotation_cache_json, 'r') as f:
self.annotations = json.loads(f.read())
else:
self.annotations = generate_images_annotations_json(main_dir, metadata_dir, subset, cls_index, version=version)
json.dump(self.annotations, open(annotation_cache_json, 'w'))
self.id_to_image_id = dict([(i, k) for (i, k) in enumerate(self.annotations)])
self.labels = self.id_to_labels
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def image_path(self, image_index):
path = os.path.join(self.base_dir, (self.id_to_image_id[image_index] + '.jpg'))
return path
def load_image(self, image_index):
path = self.image_path(image_index)
img = skimage.io.imread(path)
if (len(img.shape) == 1):
img = img[0]
if (len(img.shape) == 2):
img = skimage.color.gray2rgb(img)
try:
return (img.astype(np.float32) / 255.0)
except Exception:
print(path)
exit(0)
def load_annotations(self, image_index):
image_annotations = self.annotations[self.id_to_image_id[image_index]]
labels = image_annotations['boxes']
(height, width) = (image_annotations['h'], image_annotations['w'])
boxes = np.zeros((len(labels), 5))
for (idx, ann) in enumerate(labels):
cls_id = ann['cls_id']
x1 = (ann['x1'] * width)
x2 = (ann['x2'] * width)
y1 = (ann['y1'] * height)
y2 = (ann['y2'] * height)
boxes[(idx, 0)] = x1
boxes[(idx, 1)] = y1
boxes[(idx, 2)] = x2
boxes[(idx, 3)] = y2
boxes[(idx, 4)] = cls_id
return boxes
def image_aspect_ratio(self, image_index):
img_annotations = self.annotations[self.id_to_image_id[image_index]]
(height, width) = (img_annotations['h'], img_annotations['w'])
return (float(width) / float(height))
def num_classes(self):
return len(self.id_to_labels) |
def get_random_k_combinations(k: int, n_items: int, n_combinations: int, random_state: np.random) -> np.ndarray:
return np.array([random_state.choice(range(n_items), k, replace=False) for _ in range(n_combinations)]) |
def get_filename_from_annotations(annotations, dataset):
if (dataset == 'VOC'):
filename = annotations[0]['annotation']['filename']
elif (dataset == 'COCO'):
filename = annotations[0]['filename']
elif (dataset == 'CUB'):
filename = annotations[0]['filename']
else:
raise Exception(('Unknown dataset: ' + dataset))
return filename |
_scheme(prefixes='s3://')
def load_from_ceph(filename, map_location=None, backend='petrel'):
allowed_backends = ['ceph', 'petrel']
if (backend not in allowed_backends):
raise ValueError(f'Load from Backend {backend} is not supported.')
if (backend == 'ceph'):
warnings.warn('CephBackend will be deprecated, please use PetrelBackend instead')
try:
file_client = FileClient(backend=backend)
except ImportError:
allowed_backends.remove(backend)
file_client = FileClient(backend=allowed_backends[0])
with io.BytesIO(file_client.get(filename)) as buffer:
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint |
def sce_criterion(logits, labels):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) |
def test_scalar_reduction():
gamma = 1.4
def eigenvalues(u: dace.float64[3]):
rho = u[0]
rhov = u[1]
E = u[2]
v = (rhov / rho)
p = ((E - ((0.5 * rhov) * v)) * (gamma - 1))
c = np.sqrt(((gamma * p) / rho))
ret = np.empty_like(u)
ret[0] = (v - c)
ret[1] = v
ret[2] = (v + c)
return ret
def flux_min1(ul: dace.float64[3], ur: dace.float64[3]):
fl = np.array([0.0442802, 0., 0.])
fr = np.array([0.0, 0.1, 0.0])
eigvalsl = eigenvalues(ul)
eigvalsr = eigenvalues(ur)
sl = np.min(eigvalsl)
sr = np.max(eigvalsr)
if (sl >= 0):
return fl
elif (sr <= 0):
return fr
else:
return (((((sl * sr) * (ur - ul)) + (fl * sr)) - (fr * sl)) / (sr - sl))
ul = np.array([0., 0.0442802, 0.])
ur = np.array([0.125, 0.0, 0.25])
assert np.allclose(flux_min1(ul, ur), flux_min1.f(ul, ur)) |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('CriticalSection', import_from_module='ns.core')
module.add_class('DataRate', import_from_module='ns.network')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::FdReader'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::SystemThread'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
module.add_class('NodeContainer', import_from_module='ns.network')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('PcapFile', import_from_module='ns.network')
module.add_class('PcapHelper', import_from_module='ns.network')
module.add_enum('DataLinkType', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SLL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('SystemCondition', import_from_module='ns.core')
module.add_class('SystemMutex', import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('FdNetDeviceHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SystemThread', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('DataRateChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('DataRateValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmuFdNetDeviceHelper', parent=root_module['ns3::FdNetDeviceHelper'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('FdReader', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
module.add_class('TapFdNetDeviceHelper', parent=root_module['ns3::EmuFdNetDeviceHelper'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'unsigned char *', 'long', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('FdNetDevice', parent=root_module['ns3::NetDevice'])
module.add_enum('EncapsulationMode', ['DIX', 'LLC', 'DIXPI'], outer_class=root_module['ns3::FdNetDevice'])
module.add_class('FdNetDeviceFdReader', parent=root_module['ns3::FdReader'])
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module) |
class TestREPS(TfGraphTestCase):
.large
def test_reps_cartpole(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = REPS(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=4000)
assert (last_avg_ret > 5)
env.close() |
def _get_all_k_combinations_rec(offset: int, k: int, combination: deque, original_size: int, combinations: deque):
if (k == 0):
combinations.append(deepcopy(combination))
return
for i in range(offset, ((original_size - k) + 1), 1):
combination.append(i)
_get_all_k_combinations_rec((i + 1), (k - 1), combination, original_size, combinations)
combination.pop() |
class YaLMWindowService(LocalWindowService):
def __init__(self, service: TokenizerService):
super().__init__(service)
def tokenizer_name(self) -> str:
return 'Yandex/yalm'
def max_sequence_length(self) -> int:
return YaLMTokenizer.MAX_SEQUENCE_LENGTH
def max_request_length(self) -> int:
return (self.max_sequence_length + 1)
def end_of_text_token(self) -> str:
return YaLMTokenizer.EOS_TOKEN
def prefix_token(self) -> str:
return self.end_of_text_token
def truncate_from_right(self, text: str, expected_completion_token_length: int=0) -> str:
max_length: int = (self.max_request_length - expected_completion_token_length)
result: str = self.decode(self.encode(text, truncation=True, max_length=max_length).tokens)
while (not self.fits_within_context_window(result, expected_completion_token_length)):
result = result[:(- 1)]
return result |
class AggregateSkeletonFragmentsOperator(OperatorBase):
def __init__(self, fragments_path: str, output_path: str, name: str='aggregate-skeleton-fragments'):
super().__init__(name=name)
self.fragments_storage = CloudFiles(fragments_path)
self.output_storage = CloudFiles(output_path)
def __call__(self, prefix: str):
print(f'aggregate skeletons with prefix of {prefix}')
id2filenames = defaultdict(list)
for filename in self.fragments_storage.list_files(prefix=prefix):
filename = os.path.basename(filename)
matches = re.search('(\\d+):', filename)
if (not matches):
continue
skl_id = int(matches.group(0)[:(- 1)])
id2filenames[skl_id].append(filename)
for (skl_id, filenames) in id2filenames.items():
print(f'skeleton id: {skl_id}')
frags = self.fragments_storage.get(filenames)
frags = [PrecomputedSkeleton.from_precomputed(x['content']) for x in frags]
skel = PrecomputedSkeleton.simple_merge(frags).consolidate()
skel = kimimaro.postprocess(skel, dust_threshold=1000, tick_threshold=3500)
self.output_storage.put(file_path=str(skl_id), content=skel.to_precomputed())
sleep(0.01) |
class RemoveSelfLoops(BaseTransform):
def __call__(self, data: Data) -> Data:
if (hasattr(data, 'edge_index') and (data.edge_index is not None)):
(data.edge_index, _) = remove_self_loops(data.edge_index)
if hasattr(data, 'adj_t'):
data.adj_t = data.adj_t.remove_diag()
return data |
def init():
ax.add_patch(car)
ax.add_patch(drone)
ax.add_patch(obstacle1)
ax.add_patch(obstacle2)
ax.add_patch(obstacle3)
return (car, drone) |
def Dynamics_LC_Filter(para_LC, i_ld0, i_lq0, v_od0, v_oq0, v_id0, v_iq0, i_od0, i_oq0, w0):
r_f = para_LC['r_f']
L_f = para_LC['L_f']
C_f = para_LC['C_f']
di_ld = (((((- r_f) / L_f) * i_ld0) + (w0 * i_lq0)) + ((1 / L_f) * (v_id0 - v_od0)))
di_lq = (((((- r_f) / L_f) * i_lq0) - (w0 * i_ld0)) + ((1 / L_f) * (v_iq0 - v_oq0)))
dv_od = ((w0 * v_oq0) + ((1 / C_f) * (i_ld0 - i_od0)))
dv_oq = (((- w0) * v_od0) + ((1 / C_f) * (i_lq0 - i_oq0)))
return (di_ld, di_lq, dv_od, dv_oq) |
(params=['csr', 'csc', 'coo', 'bsr'])
def X_64bit(request):
X = sp.rand(20, 10, format=request.param)
for attr in ['indices', 'indptr', 'row', 'col']:
if hasattr(X, attr):
setattr(X, attr, getattr(X, attr).astype('int64'))
(yield X) |
def setup_test_equal_bounds():
np.random.seed(0)
x0 = np.random.rand(4)
lb = np.array([0, 2, (- 1), (- 1.0)])
ub = np.array([3, 2, 2, (- 1.0)])
i_eb = (lb == ub)
def check_x(x, check_size=True, check_values=True):
if check_size:
assert (x.size == 4)
if check_values:
assert_allclose(x[i_eb], lb[i_eb])
def func(x):
check_x(x)
return optimize.rosen(x)
def grad(x):
check_x(x)
return optimize.rosen_der(x)
def callback(x, *args):
check_x(x)
def constraint1(x):
check_x(x, check_values=False)
return (x[0:1] - 1)
def jacobian1(x):
check_x(x, check_values=False)
dc = np.zeros_like(x)
dc[0] = 1
return dc
def constraint2(x):
check_x(x, check_values=False)
return (x[2:3] - 0.5)
def jacobian2(x):
check_x(x, check_values=False)
dc = np.zeros_like(x)
dc[2] = 1
return dc
c1a = NonlinearConstraint(constraint1, (- np.inf), 0)
c1b = NonlinearConstraint(constraint1, (- np.inf), 0, jacobian1)
c2a = NonlinearConstraint(constraint2, (- np.inf), 0)
c2b = NonlinearConstraint(constraint2, (- np.inf), 0, jacobian2)
methods = ('L-BFGS-B', 'SLSQP', 'TNC')
kwds = ({'fun': func, 'jac': False}, {'fun': func, 'jac': grad}, {'fun': (lambda x: (func(x), grad(x))), 'jac': True})
bound_types = ((lambda lb, ub: list(zip(lb, ub))), Bounds)
constraints = ((None, None), ([], []), (c1a, c1b), (c2b, c2b), ([c1b], [c1b]), ([c2a], [c2b]), ([c1a, c2a], [c1b, c2b]), ([c1a, c2b], [c1b, c2b]), ([c1b, c2b], [c1b, c2b]))
callbacks = (None, callback)
data = {'methods': methods, 'kwds': kwds, 'bound_types': bound_types, 'constraints': constraints, 'callbacks': callbacks, 'lb': lb, 'ub': ub, 'x0': x0, 'i_eb': i_eb}
return data |
def prepare_data(dataset):
dataloader = DataLoader(dataset, batch_size=16, shuffle=True, pin_memory=True, timeout=60, num_workers=1, drop_last=True)
sentences = []
for (bix, data) in tqdm(enumerate(dataloader)):
for i in range(len(data[0])):
input = data[0][i]
label = data[1][i]
fpath = data[2][i]
exidx = int(data[3][i])
uid = int(data[4][i])
entry = {'text': input, 'gold': label, 'exidx': exidx, 'uid': uid}
sentences.append(entry)
index2label = dataset.index2label
queries_lst = [v for (k, v) in index2label.items()]
return (sentences, queries_lst) |
def is_disjoint(T1, T2):
for i in range(T1.nrows()):
for j in range(T1.ncols()):
if ((T1[(i, j)] < 0) and (T2[(i, j)] < 0)):
continue
if (T1[(i, j)] == T2[(i, j)]):
return False
return True |
class _SearchStatistics():
_logger = logging.getLogger(__name__)
def __init__(self):
self._backend: (None | sb.AbstractStatisticsBackend) = self._initialise_backend()
self._output_variables: dict[(str, sb.OutputVariable)] = {}
self._variable_factories: dict[(str, ovf.ChromosomeOutputVariableFactory)] = {}
self._sequence_output_variable_factories: dict[(str, ovf.SequenceOutputVariableFactory)] = {}
self._init_factories()
self.set_output_variable_for_runtime_variable(RuntimeVariable.RandomSeed, config.configuration.seeding.seed)
self._fill_sequence_output_variable_factories()
self._start_time = time.time_ns()
self.set_sequence_output_variable_start_time(self._start_time)
self._best_individual: (chrom.Chromosome | None) = None
def _initialise_backend() -> (sb.AbstractStatisticsBackend | None):
backend = config.configuration.statistics_output.statistics_backend
if (backend == config.StatisticsBackend.CONSOLE):
return sb.ConsoleStatisticsBackend()
if (backend == config.StatisticsBackend.CSV):
return sb.CSVStatisticsBackend()
return None
def _init_factories(self) -> None:
self._variable_factories[RuntimeVariable.Length.name] = self._ChromosomeLengthOutputVariableFactory()
self._variable_factories[RuntimeVariable.Size.name] = self._ChromosomeSizeOutputVariableFactory()
self._variable_factories[RuntimeVariable.Coverage.name] = self._ChromosomeCoverageOutputVariableFactory()
self._variable_factories[RuntimeVariable.Fitness.name] = self._ChromosomeFitnessOutputVariableFactory()
def _fill_sequence_output_variable_factories(self) -> None:
self._sequence_output_variable_factories[RuntimeVariable.CoverageTimeline.name] = self._CoverageSequenceOutputVariableFactory()
self._sequence_output_variable_factories[RuntimeVariable.SizeTimeline.name] = self._SizeSequenceOutputVariableFactory()
self._sequence_output_variable_factories[RuntimeVariable.LengthTimeline.name] = self._LengthSequenceOutputVariableFactory()
self._sequence_output_variable_factories[RuntimeVariable.FitnessTimeline.name] = self._FitnessSequenceOutputVariableFactory()
self._sequence_output_variable_factories[RuntimeVariable.TotalExceptionsTimeline.name] = ovf.DirectSequenceOutputVariableFactory.get_integer(RuntimeVariable.TotalExceptionsTimeline)
def set_sequence_output_variable_start_time(self, start_time: int) -> None:
for factory in self._sequence_output_variable_factories.values():
factory.set_start_time(start_time)
def current_individual(self, individual: chrom.Chromosome) -> None:
if (not self._backend):
return
if (not isinstance(individual, chrom.Chromosome)):
self._logger.warning('SearchStatistics expected a TestSuiteChromosome')
return
self._logger.debug('Received individual')
self._best_individual = individual
for variable_factory in self._variable_factories.values():
self.set_output_variable(variable_factory.get_variable(individual))
for seq_variable_factory in self._sequence_output_variable_factories.values():
seq_variable_factory.update(individual)
def set_output_variable(self, variable: sb.OutputVariable) -> None:
if (variable.name in self._sequence_output_variable_factories):
var = self._sequence_output_variable_factories[variable.name]
assert isinstance(var, ovf.DirectSequenceOutputVariableFactory)
var.set_value(variable.value)
else:
self._output_variables[variable.name] = variable
def update_output_variable(self, variable: sb.OutputVariable) -> None:
if (variable.name not in self._sequence_output_variable_factories):
raise AssertionError('Can only be called on sequence variable.')
var = self._sequence_output_variable_factories[variable.name]
assert isinstance(var, ovf.DirectSequenceOutputVariableFactory)
var.update_value(variable.value)
def set_output_variable_for_runtime_variable(self, variable: RuntimeVariable, value: Any) -> None:
self.set_output_variable(sb.OutputVariable(name=variable.name, value=value))
def update_output_variable_for_runtime_variable(self, variable: RuntimeVariable, value: Any) -> None:
self.update_output_variable(sb.OutputVariable(name=variable.name, value=value))
def output_variables(self) -> dict[(str, sb.OutputVariable)]:
return self._output_variables
def _get_output_variables(self, individual, skip_missing: bool=True) -> dict[(str, sb.OutputVariable)]:
output_variables_map: dict[(str, sb.OutputVariable)] = {}
for variable in config.configuration.statistics_output.output_variables:
variable_name = variable.name
if (variable_name in self._output_variables):
output_variables_map[variable_name] = self._output_variables[variable_name]
elif (variable_name in self._variable_factories):
output_variables_map[variable_name] = self._variable_factories[variable_name].get_variable(individual)
elif (variable_name in self._sequence_output_variable_factories):
assert (config.configuration.stopping.maximum_search_time >= 0), 'Tracking sequential variables is only possible when using maximum search time as a stopping condition'
for var in self._sequence_output_variable_factories[variable_name].get_output_variables():
output_variables_map[var.name] = var
elif skip_missing:
output_variables_map[variable_name] = sb.OutputVariable(name=variable_name, value='')
else:
self._logger.error('No obtained value for output variable %s', variable_name)
return {}
return output_variables_map
def write_statistics(self) -> bool:
self._logger.info('Writing statistics')
self._backend = self._initialise_backend()
if (not self._backend):
return False
self._output_variables[RuntimeVariable.TotalTime.name] = sb.OutputVariable(name=RuntimeVariable.TotalTime.name, value=(time.time_ns() - self._start_time))
if (not self._best_individual):
self._logger.error('No statistics has been saved because Pynguin failed to generate any test case')
return False
individual = self._best_individual
output_variables_map = self._get_output_variables(individual)
self._backend.write_data(output_variables_map)
return True
class _ChromosomeLengthOutputVariableFactory(ovf.ChromosomeOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.Length)
def get_data(self, individual: chrom.Chromosome) -> int:
return individual.length()
class _ChromosomeSizeOutputVariableFactory(ovf.ChromosomeOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.Size)
def get_data(self, individual: chrom.Chromosome) -> int:
return individual.size()
class _ChromosomeCoverageOutputVariableFactory(ovf.ChromosomeOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.Coverage)
def get_data(self, individual: chrom.Chromosome) -> float:
return individual.get_coverage()
class _ChromosomeFitnessOutputVariableFactory(ovf.ChromosomeOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.Fitness)
def get_data(self, individual: chrom.Chromosome) -> float:
return individual.get_fitness()
class _CoverageSequenceOutputVariableFactory(ovf.DirectSequenceOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.CoverageTimeline, 0.0)
def get_value(self, individual: chrom.Chromosome) -> float:
return individual.get_coverage()
class _SizeSequenceOutputVariableFactory(ovf.DirectSequenceOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.SizeTimeline, 0)
def get_value(self, individual: chrom.Chromosome) -> int:
return individual.size()
class _LengthSequenceOutputVariableFactory(ovf.DirectSequenceOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.LengthTimeline, 0)
def get_value(self, individual: chrom.Chromosome) -> int:
return individual.length()
class _FitnessSequenceOutputVariableFactory(ovf.DirectSequenceOutputVariableFactory):
def __init__(self) -> None:
super().__init__(RuntimeVariable.FitnessTimeline, 0.0)
def get_value(self, individual: chrom.Chromosome) -> float:
return individual.get_fitness() |
def rbf_mmd2_and_ratio(X, Y, sigma=1, biased=True):
return mix_rbf_mmd2_and_ratio(X, Y, sigmas=[sigma], biased=biased) |
def file_exists(filepath):
if filepath.startswith('gs://'):
(bucket_name, file_name) = split_gcs_bucket_and_filepath(filepath)
bucket = gcs_bucket(bucket_name)
return bucket.blob(file_name).exists()
else:
return os.path.exists(filepath) |
_mock.Mocker(kw='mock')
def test_parse_results_amz(**kwargs):
mock_file = open('tests/transfer/mocks/mock_parse_results_amz', 'rb')
mock_body = mock_file.read()
mock_file.close()
mock_query = 'red basketball shoes'
query = mock_query.replace(' ', '+')
kwargs['mock'].get(f' content=mock_body)
output = parse_results_amz(mock_query, 1)
expected = [{'Price': '59.49', 'Title': 'High Top Mens Basketball Shoes Lou Williams Streetball Master Breathable Non Slip Outdoor Sneakers Cushioning Workout Shoes for Fitness', 'asin': 'B083QCWF61'}, {'Price': '45.99', 'Title': 'Kids Basketball Shoes High-top Sports Shoes Sneakers Durable Lace-up Non-Slip Running Shoes Secure for Little Kids Big Kids and Boys Girls', 'asin': 'B08FWWWQ11'}, {'Price': '64.99', 'Title': 'Unisex-Adult Lockdown 5 Basketball Shoe', 'asin': 'B0817BFNC4'}, {'Price': '63.75', 'Title': 'Unisex-Child Team Hustle D 9 (Gs) Sneaker', 'asin': 'B07HHTS79M'}, {'Price': '74.64', 'Title': 'Unisex-Adult D.O.N. Issue 3 Basketball Shoe', 'asin': 'B08N8DQLS2'}, {'Price': '104.90', 'Title': "Men's Lebron Witness IV Basketball Shoes", 'asin': 'B07TKMMHVB'}, {'Price': '36.68', 'Title': "Unisex-Child Pre-School Jet '21 Basketball Shoe", 'asin': 'B08N6VRHV4'}, {'Price': '59.98', 'Title': "Men's Triple Basketball Shoe", 'asin': 'B08QCL8VKM'}, {'Price': '45.98', 'Title': 'Unisex-Child Pre School Lockdown 4 Basketball Shoe', 'asin': 'B07HKP12DH'}, {'Price': '143.72', 'Title': "Men's Basketball Shoes", 'asin': 'B07SNR7HRF'}]
assert (output == expected) |
def world_extract(x, fs, f0min, f0max):
x = (x * np.iinfo(np.int16).max)
x = np.array(x, dtype=np.float64)
x = low_cut_filter(x, fs)
(f0, time_axis) = pw.harvest(x, fs, f0_floor=f0min, f0_ceil=f0max, frame_period=MCEP_SHIFT)
sp = pw.cheaptrick(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
ap = pw.d4c(x, f0, time_axis, fs, fft_size=MCEP_FFTL)
mcep = pysptk.sp2mc(sp, MCEP_DIM, MCEP_ALPHA)
npow = spc2npow(sp)
return {'sp': sp, 'mcep': mcep, 'ap': ap, 'f0': f0, 'npow': npow} |
def find_span_from_text(context, tokens, answer):
assert (answer in context)
offset = 0
spans = []
scanning = None
process = []
for (i, token) in enumerate(tokens):
token = token.replace(' ##', '').replace('##', '')
while (context[offset:(offset + len(token))] != token):
offset += 1
if (offset >= len(context)):
break
if (scanning is not None):
end = (offset + len(token))
if answer.startswith(context[scanning[(- 1)][(- 1)]:end]):
if (context[scanning[(- 1)][(- 1)]:end] == answer):
span = (scanning[0][0], i, scanning[0][1])
spans.append(span)
elif (len(context[scanning[(- 1)][(- 1)]:end]) >= len(answer)):
scanning = None
else:
scanning = None
if ((scanning is None) and answer.startswith(token)):
if (token == answer):
spans.append((i, i, offset))
if (token != answer):
scanning = [(i, offset)]
offset += len(token)
if (offset >= len(context)):
break
process.append((token, offset, scanning, spans))
answers = []
for (word_start, word_end, span) in spans:
assert ((context[span:(span + len(answer))] == answer) or (''.join(tokens[word_start:(word_end + 1)]).replace('##', '') != answer.replace(' ', '')))
answers.append({'text': answer, 'answer_start': span, 'word_start': word_start, 'word_end': word_end})
return answers |
def test_RegularArray_RecordArray_NumpyArray():
v2a = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), 3)
resultv2 = v2a._carry(ak.index.Index(np.array([0], np.int64)), False)
assert (to_list(resultv2) == [[{'nest': 0.0}, {'nest': 1.1}, {'nest': 2.2}]])
assert (v2a.to_typetracer()._carry(ak.index.Index(np.array([0], np.int64)), False).form == resultv2.form)
v2b = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.emptyarray.EmptyArray()], ['nest']), 0, zeros_length=10)
resultv2 = v2b._carry(ak.index.Index(np.array([0], np.int64)), False)
assert (to_list(resultv2) == [[]])
assert (v2b.to_typetracer()._carry(ak.index.Index(np.array([0], np.int64)), False).form == resultv2.form) |
def train():
if (args.run_mode == 'debug'):
print_iteration = 10
save_image_iteration = 10
add_scalar_iteration = 1
add_histogram_iteration = 10
else:
print_iteration = 10
add_scalar_iteration = 100
save_image_iteration = 1000
add_histogram_iteration = 1000
net.train()
batch_iterator = None
data_loader = data.DataLoader(dataset=dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True, collate_fn=collate_fn, pin_memory=False)
step_index = 0
current_lr = args.lr
for iteration in range(start_iter):
if (iteration in step_values):
step_index += 1
batch_iterator = None
for iteration in range(start_iter, end_iter):
if ((not batch_iterator) or ((iteration % epoch_size) == 0)):
batch_iterator = iter(data_loader)
all_epoch_loss = []
if (iteration in step_values):
step_index += 1
if (batch_iterator is None):
continue
(frames_1, target_1, times_1) = next(batch_iterator)
if (frames_1 is None):
continue
if ((frames_1 is None) or (target_1 is None) or (times_1 is None)):
continue
if args.cuda:
frames_1 = Variable(frames_1.cuda())
with torch.no_grad():
target_1 = [[Variable(target[j].cuda()) for j in range(len(target))] for target in target_1]
times_1 = Variable(times_1.cuda())
else:
pass
t0 = time.time()
(param, p_c, p_e) = net(frames_1)
optimizer.zero_grad()
(loss_l, loss_c, loss_e) = criterion((param, p_c, p_e, dmmn.priors), target_1, times_1)
loss = ((loss_l + loss_c) + loss_e)
if torch.isnan(loss):
print('nan loss ignored')
continue
loss.backward()
optimizer.step()
all_epoch_loss += [loss.data.cpu()]
t1 = time.time()
if ((iteration % print_iteration) == 0):
print(('Timer: %.4f sec.' % (t1 - t0)))
print(((((('iter ' + str(iteration)) + ', ') + str(epoch_size)) + (' || epoch: %.4f ' % (iteration / float(epoch_size)))) + (' || Loss: %.4f ||' % all_epoch_loss[(- 1)])), end=' ')
if (args.tensorboard and ((iteration % add_scalar_iteration) == 0)):
writer.add_scalar('data/learning_rate', current_lr, iteration)
writer.add_scalar('loss/loss', loss.data.cpu(), iteration)
writer.add_scalar('loss-location', loss_l.data.cpu(), iteration)
writer.add_scalar('loss-classification', loss_c.data.cpu(), iteration)
writer.add_scalar('loss-exists', loss_e.data.cpu(), iteration)
show_bboxes(frames_1, target_1, is_save=True, iteration=iteration)
if ((iteration % save_weights_iteration) == 0):
print('Saving weights, iter: {}'.format(iteration))
torch.save(dmmn.state_dict(), os.path.join(args.weights_save_folder, (('dmmn' + repr(iteration)) + '.pth')))
torch.save(dmmn.state_dict(), (((args.weights_save_folder + '') + args.version) + '.pth')) |
def rename_state_dict_keys(state_dict):
new_state_dict = OrderedDict()
for (key, value) in state_dict.items():
new_key = str(key).replace('model.', '')
new_state_dict[new_key] = value
return new_state_dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.