code stringlengths 101 5.91M |
|---|
def simple_total_photo_ion_coefficients(simple_index_nlte_ion):
simple_photo_ion_coefficients = [0., 0.]
return pd.DataFrame(simple_photo_ion_coefficients, index=simple_index_nlte_ion) |
def is_torch_bf16_available():
warnings.warn("The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu", FutureWarning)
return is_torch_bf16_gpu_available() |
def dummy_forward_monkeypatch(module: _torch.nn.Module) -> _MonkeyPatchBase:
def encapsulator(fmodule: _MonkeyPatchBase, module: _torch.nn.Module) -> None:
params = list(module.parameters())
buffer_sync(module, fmodule, None)
fmodule.update_params(params)
fmodule = make_functional(module, encapsulator=None)
return (fmodule, encapsulator) |
class DihedralGroup(UniqueRepresentation, Parent):
def __init__(self, n=5):
assert (n >= 2)
Parent.__init__(self, category=FiniteCoxeterGroups())
self.n = n
def _repr_(self):
return ('The %s-th dihedral group of order %s' % (self.n, (2 * self.n)))
def __contains__(self, x):
from sage.structure.all import parent
return (parent(x) is self)
_method
def one(self):
return self(())
def index_set(self):
return (1, 2)
def degrees(self):
from sage.rings.integer_ring import ZZ
return (ZZ(2), ZZ(self.n))
def coxeter_matrix(self):
return CoxeterMatrix([[1, self.n], [self.n, 1]])
class Element(ElementWrapper):
wrapped_class = tuple
__lt__ = ElementWrapper._lt_by_value
def has_right_descent(self, i, positive=False, side='right'):
reduced_word = self.value
if (len(reduced_word) == self.parent().n):
return (not positive)
elif (len(reduced_word) == 0):
return positive
else:
return ((i == reduced_word[(0 if (side == 'left') else (- 1))]) == (not positive))
def apply_simple_reflection_right(self, i):
from copy import copy
reduced_word = copy(self.value)
n = self.parent().n
if (len(reduced_word) == n):
if (((i == 1) and is_odd(n)) or ((i == 2) and is_even(n))):
return self.parent()(reduced_word[:(- 1)])
else:
return self.parent()(reduced_word[1:])
elif (((len(reduced_word) == (n - 1)) and (not self.has_descent(i))) and (reduced_word[0] == 2)):
return self.parent()(((1,) + reduced_word))
elif self.has_descent(i):
return self.parent()(reduced_word[:(- 1)])
else:
return self.parent()((reduced_word + (i,))) |
def test_copy():
x = np.array([1], dtype=np.float64)
y = img_as_float(x)
z = img_as_float(x, force_copy=True)
assert (y is x)
assert (z is not x) |
class _ApproximateKernel(gpflow.kernels.Kernel):
def __init__(self, feature_functions: tf.keras.layers.Layer, feature_coefficients: TensorType):
self._feature_functions = feature_functions
self._feature_coefficients = feature_coefficients
def K(self, X: TensorType, X2: Optional[TensorType]=None) -> tf.Tensor:
phi = self._feature_functions(X)
if (X2 is None):
phi2 = phi
else:
phi2 = self._feature_functions(X2)
r = tf.matmul(phi, (tf.transpose(self._feature_coefficients) * phi2), transpose_b=True)
(N1, N2) = (tf.shape(phi)[0], tf.shape(phi2)[0])
tf.debugging.assert_equal(tf.shape(r), [N1, N2])
return r
def K_diag(self, X: TensorType) -> tf.Tensor:
phi_squared = (self._feature_functions(X) ** 2)
r = tf.reduce_sum((phi_squared * tf.transpose(self._feature_coefficients)), axis=1)
N = tf.shape(X)[0]
tf.debugging.assert_equal(tf.shape(r), [N])
return r |
def get_glosary_info():
res = {}
for wf in get_wf_fields():
res[wf.glossary_name] = (wf.__doc__, wf().find_units_label())
return res |
class TestCLI(SnipsTest):
fixture_dir = (TEST_PATH / 'cli_fixture')
def setUp(self):
super(TestCLI, self).setUp()
if (not self.fixture_dir.exists()):
self.fixture_dir.mkdir()
dataset_stream = io.StringIO(u'\n---\ntype: intent\nname: MakeTea\nutterances:\n - make me a [beverage_temperature:Temperature](hot) cup of tea\n - make me [number_of_cups:snips/number](five) tea cups\n - i want [number_of_cups] cups of [beverage_temperature](boiling hot) tea pls\n - can you prepare [number_of_cups] cup of [beverage_temperature](cold) tea ?\n\n---\ntype: intent\nname: MakeCoffee\nutterances:\n - make me [number_of_cups:snips/number](one) cup of coffee please\n - brew [number_of_cups] cups of coffee\n - can you prepare [number_of_cups] cup of coffee')
beverage_dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
self.beverage_dataset_path = (self.fixture_dir / 'beverage_dataset.json')
if self.beverage_dataset_path.exists():
self.beverage_dataset_path.unlink()
with self.beverage_dataset_path.open(mode='w', encoding='utf8') as f:
f.write(json_string(beverage_dataset))
self.tmp_file_path = (self.fixture_dir / next(tempfile._get_candidate_names()))
while self.tmp_file_path.exists():
self.tmp_file_path = (self.fixture_dir / next(tempfile._get_candidate_names()))
def tearDown(self):
if self.fixture_dir.exists():
shutil.rmtree(str(self.fixture_dir))
def test_train(self):
train(self.beverage_dataset_path, str(self.tmp_file_path))
if (not self.tmp_file_path.exists()):
self.fail('No trained engine generated')
msg = 'Failed to create an engine from engine dict.'
with self.fail_if_exception(msg):
SnipsNLUEngine.from_path(self.tmp_file_path)
def test_parse(self):
dataset_stream = io.StringIO(u'\n---\ntype: intent\nname: MakeTea\nutterances:\n - make me a [beverage_temperature:Temperature](hot) cup of tea\n - make me [number_of_cups:snips/number](five) tea cups\n\n---\ntype: intent\nname: MakeCoffee\nutterances:\n - brew [number_of_cups:snips/number](one) cup of coffee please\n - make me [number_of_cups] cups of coffee')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
nlu_engine = SnipsNLUEngine().fit(dataset)
nlu_engine.persist(self.tmp_file_path)
output_target = io.StringIO()
with self.fail_if_exception('Failed to parse using CLI script'):
with redirect_stdout(output_target):
parse(str(self.tmp_file_path), 'Make me two cups of coffee')
output = output_target.getvalue()
expected_output = '{\n "input": "Make me two cups of coffee",\n "intent": {\n "intentName": "MakeCoffee",\n "probability": 1.0\n },\n "slots": [\n {\n "entity": "snips/number",\n "range": {\n "end": 11,\n "start": 8\n },\n "rawValue": "two",\n "slotName": "number_of_cups",\n "value": {\n "kind": "Number",\n "value": 2.0\n }\n }\n ]\n}\n'
self.assertEqual(expected_output, output)
def test_parse_with_intents_filter(self):
dataset_stream = io.StringIO(u'\n---\ntype: intent\nname: MakeTea\nutterances:\n - make me a [beverage_temperature:Temperature](hot) cup of tea\n - make me [number_of_cups:snips/number](five) tea cups\n\n---\ntype: intent\nname: Make,Coffee\nutterances:\n - brew [number_of_cups:snips/number](one) cup of coffee please\n - make me [number_of_cups] cups of coffee')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
nlu_engine = SnipsNLUEngine().fit(dataset)
nlu_engine.persist(self.tmp_file_path)
output_target = io.StringIO()
with self.fail_if_exception('Failed to parse using CLI script'):
with redirect_stdout(output_target):
parse(str(self.tmp_file_path), 'Make me two cups of coffee', False, 'MakeTea,"Make,Coffee"')
output = output_target.getvalue()
expected_output = '{\n "input": "Make me two cups of coffee",\n "intent": {\n "intentName": "Make,Coffee",\n "probability": 1.0\n },\n "slots": [\n {\n "entity": "snips/number",\n "range": {\n "end": 11,\n "start": 8\n },\n "rawValue": "two",\n "slotName": "number_of_cups",\n "value": {\n "kind": "Number",\n "value": 2.0\n }\n }\n ]\n}\n'
self.assertEqual(expected_output, output)
def test_generate_dataset(self):
yaml_string = '\n# searchFlight Intent\n---\ntype: intent\nname: searchFlight\nutterances:\n - find me a flight to [destination:city](Lima) [date:snips/datetime](tonight)\n\n# City Entity\n---\ntype: entity\nname: city\nvalues:\n - [new york, big apple]'
self.tmp_file_path = self.tmp_file_path.with_suffix('.yaml')
with self.tmp_file_path.open(mode='w', encoding='utf8') as f:
f.write(unicode_string(yaml_string))
out = io.StringIO()
with redirect_stdout(out):
generate_dataset('en', str(self.tmp_file_path))
printed_value = out.getvalue()
expected_value = '{\n "entities": {\n "city": {\n "automatically_extensible": true,\n "data": [\n {\n "synonyms": [\n "big apple"\n ],\n "value": "new york"\n }\n ],\n "matching_strictness": 1.0,\n "use_synonyms": true\n },\n "snips/datetime": {}\n },\n "intents": {\n "searchFlight": {\n "utterances": [\n {\n "data": [\n {\n "text": "find me a flight to "\n },\n {\n "entity": "city",\n "slot_name": "destination",\n "text": "Lima"\n },\n {\n "text": " "\n },\n {\n "entity": "snips/datetime",\n "slot_name": "date",\n "text": "tonight"\n }\n ]\n }\n ]\n }\n },\n "language": "en"\n}\n'
self.assertEqual(expected_value, printed_value)
def test_cross_val_metrics(self):
cross_val_metrics(str(self.beverage_dataset_path), str(self.tmp_file_path))
if (not self.tmp_file_path.exists()):
self.fail('No metrics found')
def test_train_test_metrics(self):
train_test_metrics(str(self.beverage_dataset_path), str(self.beverage_dataset_path), str(self.tmp_file_path))
if (not self.tmp_file_path.exists()):
self.fail('No metrics found')
('snips_nlu.cli.versions.print')
('snips_nlu.cli.metrics.train_test_metrics')
('snips_nlu.cli.metrics.cross_val_metrics')
('snips_nlu.cli.link.link')
('snips_nlu.cli.download_entity.download_language_builtin_entities')
('snips_nlu.cli.download_entity.download_builtin_entity')
('snips_nlu.cli.download.download_all_languages')
('snips_nlu.cli.download.download')
('snips_nlu.cli.inference.parse')
('snips_nlu.cli.training.train')
('snips_nlu.cli.generate_dataset.generate_dataset')
def test_main_arg_parser(self, mocked_generate_dataset, mocked_train, mocked_parse, mocked_download, mocked_download_all_languages, mocked_download_entity, mocked_download_language_entities, mocked_link, mocked_cross_val_metrics, mocked_train_test_metrics, mocked_print_function):
arg_parser = get_arg_parser()
tests = [('generate-dataset en intent.yaml entity.yaml', mocked_generate_dataset, ['en', 'intent.yaml', 'entity.yaml']), ('train -c config.json -r 42 dataset.json engine -vv', mocked_train, ['dataset.json', 'engine', 'config.json', 2, 42]), ('parse engine', mocked_parse, ['engine', None, 0, None]), ('parse engine -f MakeCoffee,MakeTea', mocked_parse, ['engine', None, 0, 'MakeCoffee,MakeTea']), ('parse engine -q foobar', mocked_parse, ['engine', 'foobar', 0, None]), ('download en', mocked_download, ['en', False]), ('download en -- --user', mocked_download, ['en', False, '--user']), ('download-all-languages', mocked_download_all_languages, []), ('download-all-languages -- --user', mocked_download_all_languages, ['--user']), ('download-entity snips/musicArtist en', mocked_download_entity, ['snips/musicArtist', 'en']), ('download-entity snips/musicArtist en -- --user', mocked_download_entity, ['snips/musicArtist', 'en', '--user']), ('download-language-entities en', mocked_download_language_entities, ['en']), ('download-language-entities en -- --user', mocked_download_language_entities, ['en', '--user']), ('link origin dest --force', mocked_link, ['origin', 'dest', True]), ('cross-val-metrics -c config.json -n 5 -t 0.5 -s -i dataset.json metrics.json -vv', mocked_cross_val_metrics, ['dataset.json', 'metrics.json', 'config.json', 5, 0.5, True, True, 2]), ('train-test-metrics -c config.json -s -i train.json test.json metrics.json -vv', mocked_train_test_metrics, ['train.json', 'test.json', 'metrics.json', 'config.json', True, True, 2]), ('version', mocked_print_function, [__version__]), ('model-version', mocked_print_function, [__model_version__])]
for (args, expected_fn, expected_fn_args) in tests:
sys.argv = (['snips-nlu'] + args.split())
parsed_args = arg_parser.parse_args()
parsed_args.func(parsed_args)
expected_fn.assert_called_with(*expected_fn_args) |
def psnr(img1, img2):
mse = np.mean(((img1 - img2) ** 2))
if (mse == 0):
return 100
PIXEL_MAX = np.max(img2)
return (20 * log10((PIXEL_MAX / sqrt(mse)))) |
def _has_sufficient_memory(device, size):
if device.startswith('cuda'):
return (torch.cuda.is_available() and (torch.cuda.get_device_properties(0).total_memory >= size))
if (device == 'xla'):
raise unittest.SkipTest('TODO: Memory availability checks for XLA?')
if (device != 'cpu'):
raise unittest.SkipTest('Unknown device type')
if (not HAS_PSUTIL):
raise unittest.SkipTest('Need psutil to determine if memory is sufficient')
if (TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN):
effective_size = (size * 10)
else:
effective_size = size
if (psutil.virtual_memory().available < effective_size):
gc.collect()
return (psutil.virtual_memory().available >= effective_size) |
def mi(x, y, k=3, base=2):
assert (len(x) == len(y)), 'Lists should have same length'
assert (k <= (len(x) - 1)), 'Set k smaller than num. samples - 1'
(x, y) = flatten(*to_np_array(x, y))
intens = 1e-10
x = [list((p + (intens * nr.rand(len(x[0]))))) for p in x]
y = [list((p + (intens * nr.rand(len(y[0]))))) for p in y]
points = zip2(x, y)
tree = ss.cKDTree(points)
dvec = [tree.query(point, (k + 1), p=float('inf'))[0][k] for point in points]
(a, b, c, d) = (avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x)))
return (((((- a) - b) + c) + d) / log(base)) |
def simulator(theta, l1=0.5, l2=0.5, l3=1.0, **kwargs):
x1 = (l1 * np.sin(theta[1]))
x1 += (l2 * np.sin((theta[1] + theta[2])))
x1 += ((l3 * np.sin(((theta[1] + theta[2]) + theta[3]))) + theta[0])
x2 = (l1 * np.cos(theta[1]))
x2 += (l2 * np.cos((theta[1] + theta[2])))
x2 += (l3 * np.cos(((theta[1] + theta[2]) + theta[3])))
return np.array([x1, x2]) |
class AmazonPostReview(VirtualFunctionTool):
name = 'AmazonPostReview'
summary = 'Post a review for a previous product that was purchased.'
parameters: List[ArgParameter] = [{'name': 'product_id', 'type': 'string', 'description': 'The unique identifier of the product.', 'required': True}, {'name': 'review', 'type': 'string', 'description': 'The public review to post.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'success', 'type': 'boolean', 'description': 'Whether the review was successfully posted.'}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The product with the specified 'product_id' was not found."}] |
def _hc(k, cs, rho, omega):
return ((((cs / sin(omega)) * (rho ** k)) * sin((omega * (k + 1)))) * greater(k, (- 1))) |
def create_dummy_files(backend_specific_objects=None):
if (backend_specific_objects is None):
backend_specific_objects = read_init()
dummy_files = {}
for (backend, objects) in backend_specific_objects.items():
backend_name = (('[' + ', '.join((f'"{b}"' for b in backend.split('_and_')))) + ']')
dummy_file = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += 'from ..utils import DummyObject, requires_backends\n\n'
dummy_file += '\n'.join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file
return dummy_files |
def _resnet(arch: str, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], pretrained: bool, progress: bool, **kwargs: Any) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
sys.exit('No pre-trained model is allowed here!')
return model |
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first):
super(BidirectionalGRU, self).__init__()
self.BiGRU = nn.GRU(input_size=rnn_dim, hidden_size=hidden_size, num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNorm(rnn_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.layer_norm(x)
x = F.gelu(x)
(x, _) = self.BiGRU(x)
x = self.dropout(x)
return x |
class WFRadiationMeshNvx(RadiationField):
glossary_name = 'params/Mesh/nvx'
def __init__(self, wf):
super(WFRadiationMeshNvx, self).__init__(wf)
self.attributes.update({'units': '-', 'limits': '[2:LONG_MAX]', 'alias': ''})
def value(self):
return self._wf._srwl_wf.mesh.nvx
def value(self, val):
self._wf._srwl_wf.mesh.nvx = int(val) |
class SeparableConv2DBNFoldingTest(BaseBatchNormalizationFolding):
def __init__(self, unit_test):
super().__init__(unit_test, linear_layer=layers.SeparableConv2D)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = self.linear_layer(1, 3, padding='same')(inputs)
x = layers.BatchNormalization(beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones')(x)
x = layers.Activation('relu')(x)
return tf.keras.models.Model(inputs=inputs, outputs=x) |
def roll_buffer(buffer, *args, **kwargs):
return Buffer(torch.roll(buffer.states, *args, **kwargs), torch.roll(buffer.actions, *args, **kwargs), torch.roll(buffer.rewards, *args, **kwargs), torch.roll(buffer.dones, *args, **kwargs)) |
_test()
def test_kernels_inside_component_1():
def kernels_inside_component_1(x: dace.float32[8], y: dace.float32[8], v: dace.float32[8], w: dace.float32[8], z: dace.float32[8], t: dace.float32[8], alpha: dace.float32, beta: dace.float32):
tmp1 = (x + y)
tmp2 = (v + w)
tmp3 = (tmp1 + tmp2)
z[:] = (alpha * tmp3)
t[:] = (beta * tmp3)
x = np.random.rand(8).astype(np.float32)
y = np.random.rand(8).astype(np.float32)
v = np.random.rand(8).astype(np.float32)
w = np.random.rand(8).astype(np.float32)
z = np.random.rand(8).astype(np.float32)
t = np.random.rand(8).astype(np.float32)
alpha = 1.0
beta = 2.0
sdfg = kernels_inside_component_1.to_sdfg()
sdfg.apply_transformations([FPGATransformSDFG, InlineSDFG])
with config.set_temporary('compiler', 'fpga', 'concurrent_kernel_detection', value=True):
program = sdfg.compile()
assert (count_kernels(sdfg) == 5)
program(x=x, y=y, v=v, w=w, z=z, t=t, alpha=alpha, beta=beta)
ref_z = (alpha * (((x + y) + v) + w))
ref_t = (beta * (((x + y) + v) + w))
assert np.allclose(z, ref_z)
assert np.allclose(t, ref_t)
return sdfg |
def test_bubbles_from_slic():
out = t2c.bubbles_from_slic((1 - data_ball), n_segments=200)
assert (out[(rad, rad, rad)] == data_ball[(rad, rad, rad)]) |
class ConfigListOfType():
_type = None
def __init__(self, iterable: Iterable=None):
self._values = []
if (iterable is None):
iterable = []
for value in iterable:
self._values.append(self._type(value))
def __len__(self) -> int:
return len(self._values)
def __getitem__(self, item):
return self._values[item]
def __iter__(self):
return self._values.__iter__()
def __repr__(self):
return self._values.__repr__()
def __eq__(self, other):
return (self._values == other)
def buildWith(cls, defaultType: type):
className = (cls.__name__ + defaultType.__name__)
return type(className, (cls,), {'_type': defaultType}) |
def vis_num_instance(cat_obj_count):
total_instances_per_image = np.sum(cat_obj_count, axis=0)
plot_hist(total_instances_per_image, bins=((max(total_instances_per_image) - min(total_instances_per_image)) + 1), save_path='vis_fig/instance_dist_hist.pdf') |
def test_solo():
n_latent = 5
adata = synthetic_iid()
SCVI.setup_anndata(adata)
model = SCVI(adata, n_latent=n_latent)
model.train(1, check_val_every_n_epoch=1, train_size=0.5)
solo = SOLO.from_scvi_model(model)
solo.train(1, check_val_every_n_epoch=1, train_size=0.9)
assert ('validation_loss' in solo.history.keys())
solo.predict()
bdata = synthetic_iid()
solo = SOLO.from_scvi_model(model, bdata)
solo.train(1, check_val_every_n_epoch=1, train_size=0.9)
assert ('validation_loss' in solo.history.keys())
solo.predict() |
def grad(outputs: _TensorOrTensors, inputs: _TensorOrTensors, grad_outputs: Optional[_TensorOrTensors]=None, retain_graph: Optional[bool]=None, create_graph: bool=False, only_inputs: bool=True, allow_unused: bool=False) -> Tuple[(torch.Tensor, ...)]:
outputs = ((outputs,) if isinstance(outputs, torch.Tensor) else tuple(outputs))
inputs = ((inputs,) if isinstance(inputs, torch.Tensor) else tuple(inputs))
overridable_args = (outputs + inputs)
if has_torch_function(overridable_args):
return handle_torch_function(grad, overridable_args, outputs, inputs, grad_outputs=grad_outputs, retain_graph=retain_graph, create_graph=create_graph, only_inputs=only_inputs, allow_unused=allow_unused)
if (not only_inputs):
warnings.warn('only_inputs argument is deprecated and is ignored now (defaults to True). To accumulate gradient for other parts of the graph, please use torch.autograd.backward.')
grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(outputs))
grad_outputs_ = _make_grads(outputs, grad_outputs_)
if (retain_graph is None):
retain_graph = create_graph
return Variable._execution_engine.run_backward(outputs, grad_outputs_, retain_graph, create_graph, inputs, allow_unused) |
class InstallHeaders(Command):
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd', 'directory to install header files to'), ('force', 'f', 'force installation (overwrite existing files)')]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
if (not os.path.exists(install_dir)):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if (not hdrs):
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return (self.distribution.headers or [])
def get_outputs(self):
return self.outfiles |
class FeedbackBlock(nn.Module):
def __init__(self, mid_channels, num_blocks, upscale_factor, padding=2, prelu_init=0.2):
super().__init__()
stride = upscale_factor
kernel_size = (upscale_factor + 4)
self.num_blocks = num_blocks
self.need_reset = True
self.last_hidden = None
self.conv_first = nn.Sequential(nn.Conv2d((2 * mid_channels), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init))
self.up_blocks = nn.ModuleList()
self.down_blocks = nn.ModuleList()
self.lr_blocks = nn.ModuleList()
self.hr_blocks = nn.ModuleList()
for idx in range(self.num_blocks):
self.up_blocks.append(nn.Sequential(nn.ConvTranspose2d(mid_channels, mid_channels, kernel_size, stride, padding), nn.PReLU(init=prelu_init)))
self.down_blocks.append(nn.Sequential(nn.Conv2d(mid_channels, mid_channels, kernel_size, stride, padding), nn.PReLU(init=prelu_init)))
if (idx > 0):
self.lr_blocks.append(nn.Sequential(nn.Conv2d((mid_channels * (idx + 1)), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init)))
self.hr_blocks.append(nn.Sequential(nn.Conv2d((mid_channels * (idx + 1)), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init)))
self.conv_last = nn.Sequential(nn.Conv2d((num_blocks * mid_channels), mid_channels, kernel_size=1), nn.PReLU(init=prelu_init))
def forward(self, x):
if self.need_reset:
self.last_hidden = x
self.need_reset = False
x = torch.cat((x, self.last_hidden), dim=1)
x = self.conv_first(x)
lr_features = [x]
hr_features = []
for idx in range(self.num_blocks):
lr = torch.cat(lr_features, 1)
if (idx > 0):
lr = self.lr_blocks[(idx - 1)](lr)
hr = self.up_blocks[idx](lr)
hr_features.append(hr)
hr = torch.cat(hr_features, 1)
if (idx > 0):
hr = self.hr_blocks[(idx - 1)](hr)
lr = self.down_blocks[idx](hr)
lr_features.append(lr)
output = torch.cat(lr_features[1:], 1)
output = self.conv_last(output)
self.last_hidden = output
return output |
class QuantizeRecordingToTrainingModifier(FunctionModifier):
class SimulatedQNN(object):
def __init__(self, functions_ranks, modifier=None, config=None):
self._config = config
self._modifier = modifier
self._map_input_scale_zeropoint = defaultdict(list)
self.functions_ranks = functions_ranks
def get_function_rank(self, f):
rank = self.functions_ranks.get(f, (- 1))
return rank
def _quantize_outputs(self, f, output, axes, cfg):
rm = cfg.round_mode
nr = cfg.narrow_range
dt = cfg.dtype
pow2 = cfg.pow2.value
h = output
for next_func in f.outputs[0].function_references:
next_fn = next_func.info.type_name
next_func_rank = self.get_function_rank(next_func)
if (next_fn == 'Sink'):
return h
name = 'x0'
if (next_fn in self._modifier._fct_bin_set):
for (i, elm) in enumerate(next_func.inputs):
if (f.outputs[0] == elm):
name = 'x{}'.format(i)
scope = '{}-{}'.format(next_fn, next_func_rank)
with nn.parameter_scope(scope):
(sy, zpy) = cfg.recorder_activation.get_scale_zeropoint(h, axes=axes, narrow_range=nr, round_method=pow2, name=name)
if (sy is not None):
break
h = self.try_to_quantize(h, sy, zpy, rm, nr, dt)
self._map_input_scale_zeropoint[h] = (sy, zpy)
return h
def try_to_quantize(self, x, scale, zero_point, round_mode, narrow_range, dtype):
x = (F.quantize_linear(x, scale, zero_point, round_mode, narrow_range, dtype) if (scale is not None) else x)
return x
def try_to_dequantize(self, x, scale, zero_point):
x = (F.dequantize_linear(x, scale, zero_point) if (scale is not None) else x)
return x
def get_quantization_params(self, variable):
(scale, zero_point) = (None, None)
if ((variable.parent is not None) and (variable.parent.info.type_name == 'QuantizeLinear')):
scale = variable.parent.inputs[1].d
zero_point = variable.parent.inputs[2].d
return (scale, zero_point)
def requantize_bias(self, f, inputs, scope, rm, nr, dt, skip_bias=False):
def with_bias():
return (True if (len(inputs) == 3) else False)
functions_with_bias = ['Affine', 'Convolution', 'Deconvolution', 'DepthwiseConvolution', 'DepthwiseDeconvolution']
fn = f.info.type_name
if ((fn in functions_with_bias) and with_bias()):
(x, w, b) = inputs
(sx, zpx, sw) = (None, None, None)
(sx, zpx) = self.get_quantization_params(x)
(sw, _) = self.get_quantization_params(w)
if ((sx is not None) and (sw is not None)):
sbd = (np.reshape(sx.copy(), (1,)) * np.reshape(sw.copy(), (1,)))
with nn.parameter_scope(scope):
sb = nn.parameter.get_parameter_or_create('scale-b', (1,), sbd, False)
zpbd = np.reshape(zpx.copy(), (1,))
zpb = nn.parameter.get_parameter_or_create('zeropoint-b', (1,), zpbd, False)
if ((b.parent is not None) and (b.parent.info.type_name == 'QuantizeLinear')):
b = b.parent.inputs[0]
b = (F.quantize_linear(b, sb, zpb, rm, nr, dt) if (not skip_bias) else b)
inputs[2] = b
return inputs
def quantize_inputs(self, f, inputs, scope, cfg, axes, rm, nr, dt, pow2):
fn = f.info.type_name
params_idx = 1
if (fn in ['Concatenate', 'Stack']):
params_idx = len(inputs)
if (fn in self._modifier._fct_bin_set):
params_idx = 2
inps = []
for (i, input_var) in enumerate(inputs[:params_idx]):
if (input_var.rank == 0):
with nn.parameter_scope(scope):
(sx, zpx) = cfg.recorder_activation.get_scale_zeropoint(input_var, axes=axes, narrow_range=nr, round_method=pow2, name='x{}'.format(i))
input_var = self.try_to_quantize(input_var, sx, zpx, rm, nr, dt)
self._map_input_scale_zeropoint[input_var] = (sx, zpx)
inps.append(input_var)
for (i, input_parameter) in enumerate(inputs[params_idx:]):
with nn.parameter_scope(scope):
(sw, zpw) = cfg.recorder_weight.get_scale_zeropoint(input_parameter, axes=axes, narrow_range=nr, round_method=pow2, name='w{}'.format(i))
input_parameter = self.try_to_quantize(input_parameter, sw, zpw, rm, nr, dt)
inps.append(input_parameter)
inps = self.requantize_bias(f, inps, scope, rm, nr, dt, cfg.skip_bias)
return inps
def dequantize_inputs(self, inps):
for (i, var) in enumerate(inps):
(s, zp) = (None, None)
frefs = var.function_references
for rf in frefs:
if (rf.info.type_name == 'DequantizeLinear'):
var = rf.outputs[0]
break
if (var.parent and (var.parent.info.type_name == 'QuantizeLinear')):
(s, zp) = var.parent.inputs[1:3]
var = self.try_to_dequantize(var, s, zp)
inps[i] = var
return inps
def shared_quantization(self, f, inps, cfg):
fn = f.info.type_name
if (fn in ['Add2', 'Concatenate']):
idx = 0
min_rank = inps[0].rank
(s, zp) = (None, None)
for (i, x) in enumerate(inps[1:]):
if ((x.parent is not None) and (x.parent.info.type_name == 'DequantizeLinear')):
if (x.rank < min_rank):
idx = (i + 1)
min_rank = x.rank
if ((inps[idx].parent is not None) and (inps[idx].parent.info.type_name == 'DequantizeLinear')):
(s, zp) = inps[idx].parent.inputs[1:3]
for (i, x) in enumerate(inps):
if (i == idx):
continue
if ((inps[i].parent is not None) and (inps[i].parent.info.type_name == 'DequantizeLinear')):
inps[i] = inps[i].parent.inputs[0].parent.inputs[0]
inps[i] = self.try_to_quantize(inps[i], s, zp, cfg.round_mode, cfg.narrow_range, cfg.dtype)
inps[i] = self.try_to_dequantize(inps[i], s, zp)
return inps
def modify(self, f, inputs):
fn = f.info.type_name
cfg = self._config
function_rank = self.get_function_rank(f)
scope = '{}-{}'.format(fn, function_rank)
rm = cfg.round_mode
nr = cfg.narrow_range
dt = cfg.dtype
pow2 = cfg.pow2.value
axes = ([3] if cfg.channel_last else [1])
inps = self.quantize_inputs(f, inputs, scope, cfg, axes, rm, nr, dt, pow2)
inps = self.dequantize_inputs(inps)
inps = self.shared_quantization(f, inps, cfg)
h = self._modifier._modify_as_same(f, inps)
if (fn == 'Sink'):
return h
h = self._quantize_outputs(f, h, axes, cfg)
return h
def __finish__(self):
self._map_input_scale_zeropoint = defaultdict(list)
'\n Doc here.\n '
def __init__(self, functions_ranks, config=None):
super(QuantizeRecordingToTrainingModifier, self).__init__()
self._fct_bin_set = {'Add2': F.add2, 'Sub2': F.sub2, 'Mul2': F.mul2, 'Div2': F.div2, 'Pow2': F.pow2}
from nnabla.utils.qnn import PrecisionMode
self._mode = self._precision_mode_set = {PrecisionMode.QNN: None, PrecisionMode.SIM_QNN: self.SimulatedQNN, PrecisionMode.MIXED_QNN: None}[config.precision_mode](functions_ranks, self, config)
def modify(self, f, inputs):
return self._mode.modify(f, inputs)
def __finish__(self):
self._mode.__finish__() |
class AppGroup(click.Group):
def command(self, *args, **kwargs):
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs) |
def objective(trial):
(model_type, cfg_model, cfg_training) = train_line_parser()
cfg_model['dilation_rate'] = trial.suggest_int('dl', 1, 3)
cfg_model['kernel'] = (3 + (2 * trial.suggest_int('k', 0, 2)))
cfg_model['memroy_kernel'] = (3 + (2 * trial.suggest_int('mk', 0, 2)))
if (not cfg_training['offline']):
os.environ['WANDB_MODE'] = 'online'
else:
os.environ['WANDB_MODE'] = 'offline'
wandb.init(entity='eth-ds-lab', project='DIF Optimization', config={'model_type': model_type, 'training': cfg_training, 'model': cfg_model})
with open(os.path.join(wandb.run.dir, 'run_name.txt'), 'w') as f:
try:
f.write(wandb.run.name)
except:
f.write(('offline_run_' + str(datetime.now())))
with open(os.path.join(wandb.run.dir, 'Training.json'), 'w') as fp:
json.dump(cfg_training, fp)
with open(os.path.join(wandb.run.dir, (model_type + '.json')), 'w') as fp:
json.dump(cfg_model, fp)
wandb_logger = WandbLogger(project='DS_Lab', config={'model_type': model_type, 'training': cfg_training, 'model': cfg_model}, job_type='train', offline=True)
random.seed(cfg_training['seed'])
pl.seed_everything(cfg_training['seed'], workers=True)
EN_dataset = Earth_net_DataModule(data_dir=cfg_training['pickle_dir'], train_batch_size=cfg_training['train_batch_size'], val_batch_size=cfg_training['val_1_batch_size'], test_batch_size=cfg_training['val_2_batch_size'], mesoscale_cut=cfg_training['mesoscale_cut'])
EN_dataset.serialize_datasets(wandb.run.dir)
wd_callbacks = WandbTrain_callback(print_preds=False)
runtime_model_folder = os.path.join(wandb.run.dir, 'runtime_model')
if (not os.path.isdir(runtime_model_folder)):
os.mkdir(runtime_model_folder)
checkpoint_callback = ModelCheckpoint(dirpath=runtime_model_folder, save_on_train_epoch_end=True, save_top_k=(- 1), filename='model_{epoch:03d}')
prun_callback = PyTorchLightningPruningCallback(trial, monitor='epoch_validation_ENS')
trainer = Trainer(max_epochs=cfg_training['epochs'], logger=wandb_logger, devices=cfg_training['devices'], accelerator=cfg_training['accelerator'], callbacks=[wd_callbacks, checkpoint_callback, prun_callback], num_sanity_val_steps=0)
model = EN_model(model_type, cfg_model, cfg_training)
trainer.fit(model, EN_dataset)
if (not cfg_training['offline']):
wandb.finish()
return trainer.callback_metrics['epoch_validation_ENS'].item() |
def class_process(dir_path, dst_dir_path, class_name):
class_path = os.path.join(dir_path, class_name)
if (not os.path.isdir(class_path)):
return
dst_class_path = os.path.join(dst_dir_path, class_name)
if (not os.path.exists(dst_class_path)):
os.mkdir(dst_class_path)
for file_name in os.listdir(class_path):
if ('.avi' not in file_name):
continue
(name, ext) = os.path.splitext(file_name)
dst_directory_path = os.path.join(dst_class_path, name)
video_file_path = os.path.join(class_path, file_name)
try:
if os.path.exists(dst_directory_path):
if (not os.path.exists(os.path.join(dst_directory_path, 'image_00001.jpg'))):
subprocess.call('rm -r "{}"'.format(dst_directory_path), shell=True)
print('remove {}'.format(dst_directory_path))
os.mkdir(dst_directory_path)
else:
continue
else:
os.mkdir(dst_directory_path)
except:
print(dst_directory_path)
continue
cmd = 'ffmpeg -i "{}" -vf scale=-1:240 "{}/image_%05d.jpg"'.format(video_file_path, dst_directory_path)
print(cmd)
subprocess.call(cmd, shell=True)
print('\n') |
def test_predict_proba_test_data():
(train, test) = load_toy_cancer()
_bk = Background(modes=train.modes)
_dn = BoostedRDNClassifier(background=_bk, target='cancer', n_estimators=5)
_dn.fit(train)
assert_array_almost_equal(_dn.predict_proba(test), np.array([0.74, 0.74, 0.74, 0.25, 0.25]), decimal=2) |
def single_naive(file, prediction_horizon_list, interval_multiplier):
data = pd.read_csv(file)
train_flag = data['train_flag'].to_numpy()
training_index = sorted(np.argwhere((train_flag == 1)).reshape([(- 1)]))
testing_index = sorted(np.argwhere((train_flag == 0)).reshape([(- 1)]))
testing_data = data.iloc[testing_index]
testing_y_t = testing_data['y_t'].to_numpy()
testing_ID = testing_data['ID'].to_numpy()
predictions = pd.DataFrame(data=np.repeat(testing_y_t.reshape([(- 1), 1]), len(prediction_horizon_list), axis=(- 1)), columns=[f'y_t+{i}(mean)' for i in prediction_horizon_list])
training_data = data.iloc[training_index]
prediction = training_data['y_t'].to_numpy()
for one_horizon in prediction_horizon_list:
gt = training_data[f'y_t+{one_horizon}(val)'].to_numpy()
flag = training_data[f'y_t+{one_horizon}(flag)'].to_numpy()
residual = (gt - prediction)
std = np.std(residual[np.argwhere((flag == 1)).reshape([(- 1)])])
predictions[f'y_t+{one_horizon}(U)'] = (predictions[f'y_t+{one_horizon}(mean)'] + (interval_multiplier * std))
predictions[f'y_t+{one_horizon}(L)'] = (predictions[f'y_t+{one_horizon}(mean)'] - (interval_multiplier * std))
predictions['ID'] = testing_ID
return predictions |
def load_hparam(filename):
stream = open(filename, 'r')
docs = yaml.load_all(stream, Loader=yaml.Loader)
hparam_dict = dict()
for doc in docs:
for (k, v) in doc.items():
hparam_dict[k] = v
return hparam_dict |
def opt_config_to_gpt2_config(opt_config: OPTConfig) -> GPT2Config:
assert (opt_config.layerdrop == 0.0)
assert opt_config.layer_norm_elementwise_affine
word_embed_proj_dim = (None if (opt_config.word_embed_proj_dim == opt_config.hidden_size) else opt_config.word_embed_proj_dim)
return GPT2Config(vocab_size=opt_config.vocab_size, n_positions=opt_config.max_position_embeddings, n_embd=opt_config.hidden_size, n_layer=opt_config.num_hidden_layers, n_head=opt_config.num_attention_heads, n_inner=opt_config.ffn_dim, activation_function=opt_config.activation_function, resid_pdrop=opt_config.dropout, embd_pdrop=opt_config.dropout, attn_pdrop=opt_config.attention_dropout, initializer_range=opt_config.init_std, bos_token_id=opt_config.bos_token_id, eos_token_id=opt_config.eos_token_id, prenorm=opt_config.do_layer_norm_before, word_embed_proj_dim=word_embed_proj_dim) |
class ConstituencyClassifier(BaseClassifier):
def __init__(self, tree_embedding, labels, args):
super(ConstituencyClassifier, self).__init__()
self.labels = labels
self.config = SimpleNamespace(fc_shapes=args.fc_shapes, dropout=args.dropout, num_classes=len(labels), constituency_backprop=args.constituency_backprop, constituency_batch_norm=args.constituency_batch_norm, constituency_node_attn=args.constituency_node_attn, constituency_top_layer=args.constituency_top_layer, constituency_all_words=args.constituency_all_words, model_type=ModelType.CONSTITUENCY)
self.tree_embedding = tree_embedding
self.fc_layers = build_output_layers(self.tree_embedding.output_size, self.config.fc_shapes, self.config.num_classes)
self.dropout = nn.Dropout(self.config.dropout)
def log_configuration(self):
tlogger.info('Backprop into parser: %s', self.config.constituency_backprop)
tlogger.info('Batch norm: %s', self.config.constituency_batch_norm)
tlogger.info('Word positions used: %s', ('all words' if self.config.constituency_all_words else 'start and end words'))
tlogger.info('Attention over nodes: %s', self.config.constituency_node_attn)
tlogger.info('Intermediate layers: %s', self.config.fc_shapes)
def log_norms(self):
lines = ['NORMS FOR MODEL PARAMTERS']
lines.extend([('tree_embedding.' + x) for x in self.tree_embedding.get_norms()])
for (name, param) in self.named_parameters():
if (param.requires_grad and (not name.startswith('tree_embedding.'))):
lines.append(('%s %.6g' % (name, torch.norm(param).item())))
logger.info('\n'.join(lines))
def forward(self, inputs):
inputs = [(x.constituency if isinstance(x, SentimentDatum) else x) for x in inputs]
embedding = self.tree_embedding.embed_trees(inputs)
previous_layer = torch.stack([torch.max(x, dim=0)[0] for x in embedding], dim=0)
previous_layer = self.dropout(previous_layer)
for fc in self.fc_layers[:(- 1)]:
previous_layer = self.dropout(F.gelu(fc(previous_layer)))
out = self.fc_layers[(- 1)](previous_layer)
return out
def get_params(self, skip_modules=True):
model_state = self.state_dict()
skipped = [k for k in model_state.keys() if k.startswith('tree_embedding.')]
for k in skipped:
del model_state[k]
tree_embedding = self.tree_embedding.get_params(skip_modules)
params = {'model': model_state, 'tree_embedding': tree_embedding, 'config': self.config, 'labels': self.labels}
return params
def extract_sentences(self, doc):
return [sentence.constituency for sentence in doc.sentences] |
('Correlation')
def _CorrelationGrad(op, in_grad, in_grad1, in_grad2):
(grad0, grad1) = _correlation_module.correlation_grad(in_grad, op.inputs[0], op.inputs[1], op.outputs[1], op.outputs[2], kernel_size=op.get_attr('kernel_size'), max_displacement=op.get_attr('max_displacement'), pad=op.get_attr('pad'), stride_1=op.get_attr('stride_1'), stride_2=op.get_attr('stride_2'))
return [grad0, grad1] |
class QAMetric(Metric):
def __call__(self, output_dict: Dict[(str, torch.Tensor)], metadata_list: List[Dict]):
raise NotImplementedError |
class TestExtract(object):
def setup_method(self):
self.cases = [csr_matrix([[1, 2]]), csr_matrix([[1, 0]]), csr_matrix([[0, 0]]), csr_matrix([[1], [2]]), csr_matrix([[1], [0]]), csr_matrix([[0], [0]]), csr_matrix([[1, 2], [3, 4]]), csr_matrix([[0, 1], [0, 0]]), csr_matrix([[0, 0], [1, 0]]), csr_matrix([[0, 0], [0, 0]]), csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]]), csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]]).T]
def find(self):
for A in self.cases:
(I, J, V) = extract.find(A)
assert_equal(A.toarray(), csr_matrix(((I, J), V), shape=A.shape))
def test_tril(self):
for A in self.cases:
B = A.toarray()
for k in [(- 3), (- 2), (- 1), 0, 1, 2, 3]:
assert_equal(extract.tril(A, k=k).toarray(), np.tril(B, k=k))
def test_triu(self):
for A in self.cases:
B = A.toarray()
for k in [(- 3), (- 2), (- 1), 0, 1, 2, 3]:
assert_equal(extract.triu(A, k=k).toarray(), np.triu(B, k=k)) |
class PosedCameraTest(CamTestMixin, TestCase):
def element(cls) -> sf.PosedCamera:
return sf.PosedCamera(pose=sf.Pose3(R=sf.Rot3.from_yaw_pitch_roll(0.0, (np.pi / 2.0), 0.0), t=sf.V3(0, 0, 100)), calibration=sf.LinearCameraCal(focal_length=(440, 400), principal_point=(320, 240)), image_size=(640, 480))
def test_posed_camera(self) -> None:
posed_cam = self.element()
for _ in range(100):
global_point = (sf.V3(np.random.uniform(low=(- 1.0), high=1.0, size=(3,))) + posed_cam.pose.t)
range_to_point = (global_point - posed_cam.pose.t).norm(epsilon=1e-09)
(pixel, is_valid) = posed_cam.pixel_from_global_point(global_point)
if (is_valid == 1):
(global_point_reprojected, _) = posed_cam.global_point_from_pixel(pixel, range_to_point=range_to_point)
self.assertStorageNear(global_point, global_point_reprojected)
pixel = sf.V2(np.random.uniform(low=0, high=1000, size=(2,)))
(global_point, _) = posed_cam.global_point_from_pixel(pixel, range_to_point=1)
(pixel_reprojected, _) = posed_cam.pixel_from_global_point(global_point)
self.assertStorageNear(pixel, pixel_reprojected)
def test_warp_pixel(self) -> None:
posed_cam_1 = sf.PosedCamera(pose=sf.Pose3(R=sf.Rot3.from_yaw_pitch_roll(0.0, (np.pi / 2.0), 0.0), t=sf.V3(0.0, 2.0, 0.0)), calibration=sf.LinearCameraCal(focal_length=(440, 400), principal_point=(320, 240)))
posed_cam_2 = sf.PosedCamera(pose=sf.Pose3(R=sf.Rot3.from_yaw_pitch_roll(0.0, 0.0, ((- np.pi) / 2.0)), t=sf.V3(2.0, 0.0, 0.0)), calibration=sf.LinearCameraCal(focal_length=(440, 400), principal_point=(320, 240)))
point_on_optical_axes = sf.V3(2.0, 2.0, 0.0)
inverse_range = 0.5
(pixel_1, _) = posed_cam_1.pixel_from_global_point(point_on_optical_axes)
(pixel_2, is_valid_warp_into_2) = posed_cam_1.warp_pixel(pixel=pixel_1, inverse_range=inverse_range, target_cam=posed_cam_2)
self.assertEqual(is_valid_warp_into_2, 1)
self.assertStorageNear(pixel_1, pixel_2)
posed_cam_3 = sf.PosedCamera(pose=sf.Pose3(R=sf.Rot3(), t=sf.V3(0.0, 0.0, 1.0)), calibration=sf.LinearCameraCal(focal_length=(440, 400), principal_point=(320, 240)))
(_, is_valid_warp_into_3) = posed_cam_1.warp_pixel(pixel=pixel_1, inverse_range=inverse_range, target_cam=posed_cam_3)
self.assertEqual(is_valid_warp_into_3, 0)
symbolic_inverse_range = sf.Symbol('inv_range')
cam_1_ray = sf.V3(0.5, 1, 1)
(pixel_inf_1, _) = posed_cam_1.pixel_from_camera_point(cam_1_ray)
(pixel_inf_2, is_valid_inf) = posed_cam_1.warp_pixel(pixel=pixel_inf_1, inverse_range=symbolic_inverse_range, target_cam=posed_cam_2, epsilon=self.EPSILON)
cam_2_ray = (posed_cam_2.pose.R.inverse() * (posed_cam_1.pose.R * cam_1_ray))
(pixel_inf_2_rot_only, _) = posed_cam_2.pixel_from_camera_point(cam_2_ray)
self.assertEqual(is_valid_inf.subs(symbolic_inverse_range, 0), 1)
self.assertStorageNear(pixel_inf_2.subs(symbolic_inverse_range, 0), pixel_inf_2_rot_only, places=4)
(pixel_inf_2_exact, is_valid_inf) = posed_cam_1.warp_pixel(pixel=pixel_inf_1, inverse_range=0, target_cam=posed_cam_2)
self.assertEqual(is_valid_inf, 1)
self.assertStorageNear(pixel_inf_2_exact, pixel_inf_2_rot_only, places=9) |
def predict_type_embed_task(types_embed_array: np.array, types_embed_labels: np.array, type_space_labels: np.array, pred_task_idx: tuple, indexed_knn: AnnoyIndex, k: int) -> List[dict]:
def find_pred_task(i: int):
if (i < pred_task_idx[0]):
return 'Parameter'
elif (i < pred_task_idx[1]):
return 'Return'
else:
return 'Variable'
pred_types: List[dict] = []
for (i, embed_vec) in enumerate(tqdm(types_embed_array, total=len(types_embed_array), desc='Finding KNNs & Prediction')):
(idx, dist) = indexed_knn.get_nns_by_vector(embed_vec, k, include_distances=True)
pred_idx_scores = compute_types_score(dist, idx, type_space_labels)
pred_types.append({'original_type': types_embed_labels[i], 'predictions': pred_idx_scores, 'task': find_pred_task(i), 'is_parametric': bool(re.match('(.+)\\[(.+)\\]', types_embed_labels[i]))})
return pred_types |
def color_jitter(color_jitter, mean, std, data=None, target=None, s=0.25, p=0.2):
if (not (data is None)):
if (data.shape[1] == 3):
if (color_jitter > p):
if isinstance(s, dict):
seq = nn.Sequential(kornia.augmentation.ColorJitter(**s))
else:
seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s, contrast=s, saturation=s, hue=s))
denorm_(data, mean, std)
data = seq(data)
renorm_(data, mean, std)
return (data, target) |
class ShuffleLayer(nn.Module):
def __init__(self, groups):
super(ShuffleLayer, self).__init__()
self.groups = groups
def forward(self, x):
(batchsize, num_channels, height, width) = x.size()
channels_per_group = (num_channels // self.groups)
x = x.view(batchsize, self.groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, (- 1), height, width)
return x |
class Fusions(serial.SerializedTestCase):
(scale=st.floats(0.0001, 100.0), zp=st.integers((- 128), 128), size=st.integers(1, 100000), rand_seed=st.integers(0, 65534))
(deadline=None)
def Skip_test_tanhquantize(self, scale, zp, size, rand_seed):
np.random.seed(rand_seed)
workspace.ResetWorkspace()
pred_net = caffe2_pb2.NetDef()
pred_net.name = 'ref'
pred_net.external_input.append('X')
pred_net.external_output.append('Y_q')
pred_net.op.add().CopyFrom(core.CreateOperator('Tanh', ['X'], ['Y']))
pred_net.op.add().CopyFrom(core.CreateOperator('Int8Quantize', ['Y'], ['Y_q'], Y_scale=scale, Y_zero_point=zp))
X = np.linspace((- 1), 1, size).astype(np.float16).astype(np.float32)
pred_net_onnxified = onnxifi_caffe2_net(pred_net, {'X': X.shape}, debug=True, adjust_batch=False, use_onnx=False)
num_onnxified_ops = sum(((1 if (o.type == 'Onnxifi') else 0) for o in pred_net_onnxified.op))
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob('X', X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchInt8Blob('Y_q')
ref_net = caffe2_pb2.NetDef()
ref_net.name = 'ref'
ref_net.external_input.append('X')
ref_net.external_output.append('Y_q')
ref_net.op.add().CopyFrom(core.CreateOperator('TanhQuantFakeFp16NNPI', ['X'], ['Y_q'], Y_scale=scale, Y_zero_point=zp))
workspace.CreateNet(ref_net)
workspace.RunNet(ref_net.name)
Y_ref = workspace.FetchInt8Blob('Y_q')
if ((not np.array_equal(Y_ref.data, Y_glow.data)) or (not (Y_ref.scale == Y_glow.scale)) or (not (Y_ref.zero_point == Y_glow.zero_point))):
print_test_debug_info('tanhfusion', {'scale': scale, 'zp': zp, 'input': X, 'ideal nonquant': np.tanh(X), 'Y_glow': Y_glow, 'Y_c2': Y_ref})
assert 0 |
class Ratkowsky01(Benchmark):
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0, 1.0, 0.0, 0.1], [1000, 20.0, 3.0, 6.0]))
self.global_optimum = [[, 5., 0., 1.]]
self.fglob = 8786.404908
self.a = asarray([16.08, 33.83, 65.8, 97.2, 191.55, 326.2, 386.87, 520.53, 590.03, 651.92, 724.93, 699.56, 689.96, 637.56, 717.41])
self.b = arange(1, 16.0)
def fun(self, x, *args):
self.nfev += 1
vec = (x[0] / ((1 + exp((x[1] - (x[2] * self.b)))) ** (1 / x[3])))
return sum(((self.a - vec) ** 2)) |
def count_meta_edges(G, p_v):
partition_vector = to_np_arr(p_v)
edge_cut_counts = defaultdict(int)
edge_cut_capacities = defaultdict(float)
for (u, part_id) in enumerate(partition_vector):
for v in G.successors(u):
if (partition_vector[v] != part_id):
print('({}, {})'.format(u, v))
edge_cut_counts[part_id] += 1
edge_cut_capacities[part_id] += G[u][v]['capacity']
return (dict(edge_cut_counts), dict(edge_cut_capacities)) |
def save_weights(filename, data):
with h5py.File(filename, 'w') as file:
file.create_dataset('weights', shape=data.shape, data=data, compression='gzip', compression_opts=9) |
def test_record_struct_1():
text = 'struct[{"1": int64[parameters={"xkcd": [11, 12, 13]}]}, parameters={"wonky": ["bla", 1, 2]}]'
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert isinstance(parsedtype, ak.types.RecordType)
assert (str(parsedtype) == text) |
def masked_cross_entropy(logits, target, length):
if USE_CUDA:
length = Variable(torch.LongTensor(length)).cuda()
else:
length = Variable(torch.LongTensor(length))
logits_flat = logits.view((- 1), logits.size((- 1)))
log_probs_flat = functional.log_softmax(logits_flat, dim=1)
target_flat = target.view((- 1), 1)
losses_flat = (- torch.gather(log_probs_flat, dim=1, index=target_flat))
losses = losses_flat.view(*target.size())
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = (losses * mask.float())
loss = (losses.sum() / length.float().sum())
return loss |
class Regression(Repository, CalcRegression):
def __init__(self, base_dir='.', log_level=Log.error):
self.ServerId = ''
self.Level = 0
self.set_base_dir(base_dir)
self.LogLevel = log_level
def __set_serverId(self, serverId):
self.ServerId = serverId
'\n Handle self.Level value.\n '
def __init_level(self):
self.Level = 0
def __incr_level(self):
self.Level += 1
def __get_level(self):
return self.Level
def __delete_objects(self, plan):
for k in list(plan):
if ((k != 'Node Type') and (k != 'Plans') and (k != 'Plan') and (k != 'Relation Name') and (k != 'Schema') and (k != 'Alias') and (k != 'Parent Relationship') and (k != 'MergeFlag')):
plan.pop(k)
return plan
def __calc_regression(self, plan, reg, queryid, planid, depth):
self.__incr_level()
_level = self.__get_level()
_node_type = plan['Node Type']
for n in ('Append', 'Merge Append', 'Recursive Union', 'Nested Loop', 'BitmapAnd', 'BitmapOr'):
if (n == _node_type):
(_Xouter, _Xinner, _RR) = self.get_inputs(plan)
if (Log.debug3 <= self.LogLevel):
print('Debug3: === NodeType={}'.format(n))
print('Debug3: *** Y ActualRows={}'.format(plan['Actual Rows']))
print('Debug3: *** Xouter ={} Xinner ={}'.format(_Xouter, _Xinner))
_Y = plan['Actual Rows']
_coef = self.nested_loop(_Xouter, _Xinner, _Y)
if (type(_coef) is list):
reg.update(Coefficient=_coef)
else:
reg.update(Coefficient=[_coef])
return
'\n hash or merge join\n '
for n in ('Merge Join', 'Hash Join'):
if (n == _node_type):
(_Xouter, _Xinner, _RR) = self.get_inputs(plan)
if (Log.debug3 <= self.LogLevel):
print('Debug3: HASH or MERGE depth={} RR={} queryid={} planid={}'.format(depth, _RR, queryid, planid))
if (Log.debug3 <= self.LogLevel):
print('Debug3: === NodeType={}'.format(n))
print('Debug3: *** Y ActualRows={}'.format(plan['Actual Rows']))
print('Debug3: *** Xouter ={} Xinner ={}'.format(_Xouter, _Xinner))
_Y = plan['Actual Rows']
(_coef, _reg, _intercept) = self.merge_or_hash_join(_Xouter, _Xinner, _Y)
if (type(_coef) is list):
reg.update(Coefficient=_coef)
else:
reg.update(Coefficient=[_coef])
reg.update(Coefficient2=[round((_reg + 0.0), 5)])
reg.update(Intercept=[round((_intercept + 0.0), 5)])
return
'\n scan type\n '
if (Log.debug3 <= self.LogLevel):
print('Debug3: === NodeType={}'.format(_node_type))
print('Debug3: *** Plan Rows={} NormalizeParam={} NormalizePlanParam={}'.format(plan['Plan Rows'], plan['NormalizeParam'], plan['NormalizePlanParam']))
print('Debug3: *** Actual Rows={}'.format(plan['Actual Rows']))
(_coef, _intercept) = self.scan(plan['Plan Rows'], plan['Actual Rows'])
if (type(_coef) is list):
reg.update(Coefficient=_coef)
else:
reg.update(Coefficient=[_coef])
reg.update(Intercept=[round((_intercept + 0.0), 5)])
return
def __set_relations(self, Plans, depth):
def get_relations(plan):
if ('Relation Name' not in plan):
if ('Plans' in plan):
__plan = plan['Plans']
elif ('Plan' in plan):
__plan = plan['Plan']
else:
return
if isinstance(__plan, list):
__outer_plan = __plan[0]
__inner_plan = (__plan[1] if (2 <= len(__plan)) else None)
if (__inner_plan is None):
if ('Relation Name' in __outer_plan):
plan.update([('Relation Name', __outer_plan['Relation Name'])])
if ('Schema' in __outer_plan):
plan.update([('Schema', __outer_plan['Schema'])])
if ('Alias' in __outer_plan):
plan.update([('Alias', __outer_plan['Alias'])])
else:
if (('Relation Name' in __outer_plan) and ('Relation Name' in __inner_plan)):
plan.update([('Relation Name', [__outer_plan['Relation Name'], __inner_plan['Relation Name']])])
if (('Schema' in __outer_plan) and ('Schema' in __inner_plan)):
plan.update([('Schema', [__outer_plan['Schema'], __inner_plan['Schema']])])
if (('Alias' in __outer_plan) and ('Alias' in __inner_plan)):
plan.update([('Alias', [__outer_plan['Alias'], __inner_plan['Alias']])])
else:
if ('Relation Name' in __plan):
plan.update([('Relation Name', __plan['Relation Name'])])
if ('Schema' in __plan):
plan.update([('Schema', __plan['Schema'])])
if ('Alias' in __plan):
plan.update([('Alias', __plan['Alias'])])
def incr(plan):
if ('Node Type' in plan):
self._count += 1
def op(Plans):
if isinstance(Plans, list):
for i in range(0, len(Plans)):
incr(Plans[i])
if (self._depth == self._count):
get_relations(Plans[i])
return
elif ('Plans' in Plans[i]):
op(Plans[i]['Plans'])
else:
incr(Plans)
if (self._depth == self._count):
get_relations(Plans)
return
elif ('Plans' in Plans):
op(Plans['Plans'])
self._depth = depth
self._count = 0
op(Plans)
def __add_relations(self, Plans):
i = self.count_nodes(Plans)
while (0 < i):
self.__set_relations(Plans['Plan'], i)
i -= 1
def __regression(self, Plans, reg_param, queryid, planid):
def incr(plan):
if ('Node Type' in plan):
self._count += 1
def op(Plans, reg_param, queryid, planid):
if isinstance(Plans, list):
for i in range(0, len(Plans)):
incr(Plans[i])
self.__calc_regression(Plans[i], reg_param[i], queryid, planid, self._count)
if ('Plans' in Plans[i]):
op(Plans[i]['Plans'], reg_param[i]['Plans'], queryid, planid)
return
else:
incr(Plans)
self.__calc_regression(Plans, reg_param, queryid, planid, self._count)
if ('Plans' in Plans):
op(Plans['Plans'], reg_param['Plans'], queryid, planid)
return
self._count = 0
op(Plans, reg_param, queryid, planid)
def __get_sort_space_used(self, Plans, queryid, planid):
def pickup_sort_space_used(plan, queryid, planid):
if ('Sort Space Type' in plan):
_type = plan['Sort Space Type']
_used = plan['Sort Space Used']
for i in range(len(_type)):
if (_type[i] == 'Disk'):
if (self._max_sort_space_used < _used[i]):
self._max_sort_space_used = _used[i]
def op(Plans, queryid, planid):
if isinstance(Plans, list):
for i in range(0, len(Plans)):
pickup_sort_space_used(Plans[i], queryid, planid)
if ('Plans' in Plans[i]):
op(Plans[i]['Plans'], queryid, planid)
return
else:
pickup_sort_space_used(Plans, queryid, planid)
if ('Plans' in Plans):
op(Plans['Plans'], queryid, planid)
return
self._max_sort_space_used = 0
op(Plans, queryid, planid)
return (None if (self._max_sort_space_used == 0) else self._max_sort_space_used)
'\n Public method\n '
def regression(self, serverId, work_mem=True):
if (self.check_serverId(serverId) == False):
if (Log.error <= self.LogLevel):
print("Error: serverId '{}' is not registered.".format(serverId))
sys.exit(1)
self.__set_serverId(serverId)
self.set_log_level(self.LogLevel)
if (Log.info <= self.LogLevel):
print('Info: Calculating regression parameters.')
'\n Check the grouping stat file.\n '
_grouping_seqid = self.get_seqid_from_grouping_stat(self.ServerId)
self.check_regression_dir(self.ServerId)
_regression_seqid = self.get_seqid_from_regression_stat(self.ServerId)
if (Log.debug3 <= self.LogLevel):
print('Debug3: _grouping_seqid={} _regression_seqid={}'.format(_grouping_seqid, _regression_seqid))
'\n Calculate the regression parameters.\n '
if (_regression_seqid < _grouping_seqid):
for _hash_subdir in self.get_grouping_dir_list(self.ServerId):
_gsdirpath = self.get_grouping_subdir_path(self.ServerId, _hash_subdir)
if os.path.isdir(_gsdirpath):
_gsdirlist = self.get_grouping_subdir_list(self.ServerId, _hash_subdir)
for f in _gsdirlist:
_gpath = self.path(_gsdirpath, f)
_qp_id = str(f).split('.')
_queryid = _qp_id[0]
_planid = _qp_id[1]
if (Log.debug3 <= self.LogLevel):
print('Debug3: >>>>>> gpath={}'.format(_gpath))
_json_dict = self.read_plan_json(_gpath)
_reg_param = self.read_plan_json(_gpath)
self.__add_relations(_reg_param)
self.delete_unnecessary_objects(self.__delete_objects, _reg_param)
self.__init_level()
self.__regression(_json_dict['Plan'], _reg_param['Plan'], _queryid, _planid)
if (work_mem == True):
self.__init_level()
_max_sort_space_used = self.__get_sort_space_used(_json_dict['Plan'], _queryid, _planid)
if (_max_sort_space_used is not None):
_reg_param.update({'SortSpaceUsed': _max_sort_space_used})
'\n Write the result (regression parameters) to the regression\n directory.\n '
_rsdirpath = self.get_regression_subdir_path(self.ServerId, _hash_subdir)
if (os.path.exists(_rsdirpath) == False):
os.makedirs(_rsdirpath)
_rpath = self.path(_rsdirpath, f)
self.write_plan_json(_reg_param, _rpath)
if (Log.debug3 <= self.LogLevel):
print('Debug3: Rpath={}'.format(_rpath))
print('Debug3: reg_param={}'.format(_reg_param))
'Update stat file'
self.update_regression_stat_file(self.ServerId, _grouping_seqid) |
def get_post_state(sdfg: SDFG, state: SDFGState):
for s in sdfg.all_sdfgs_recursive():
for post_state in s.states():
if (('post_' + str(state)) == str(post_state)):
return post_state
return None |
def build_parser(line):
parser = optparse.OptionParser(add_help_option=False)
option_factories = (SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ)
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
def parser_exit(self, msg):
msg = ('Invalid requirement: %s\n%s' % (line, msg))
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser |
class JobExecutorInSeriesBlocking(ExecutorBase):
def __init__(self, n_workers: int, verbose=False):
super().__init__(n_workers, verbose=verbose)
self._creation_time = time.time()
def run_until_n_free(self, n_desired_free_workers) -> None:
while (self.n_free_workers < n_desired_free_workers):
self.run_next()
def run_until_empty(self) -> None:
while (self.n_free_workers < self.n_workers):
self.run_next()
def _update_internal_state(self):
while ((len(self._running_tasks) < self.n_workers) and (len(self._queue) > 0)):
self._running_tasks.append(self._queue.pop(0))
self.n_busy_workers = len(self._running_tasks)
self.n_free_workers = (self.n_workers - self.n_busy_workers)
def run_next(self):
self._move_tasks_from_queue_to_running()
if (len(self._running_tasks) > 0):
job = self._running_tasks.pop(0)
self._validate_job(job)
result = job['f'](job['x'])
job['y'] = result
self._completed_tasks.append(job)
self._update_internal_state()
def age(self):
return (time.time() - self._creation_time)
def _move_tasks_from_queue_to_running(self):
while ((len(self._running_tasks) < self.n_workers) and (len(self._queue) > 0)):
self._running_tasks.append(self._queue.pop(0)) |
class LeakyReLU(Module):
def __init__(self, negative_slope=0.01, inplace=False):
super(LeakyReLU, self).__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, input):
return F.leaky_relu(input, self.negative_slope, self.inplace)
def extra_repr(self):
inplace_str = (', inplace' if self.inplace else '')
return 'negative_slope={}{}'.format(self.negative_slope, inplace_str) |
class CPDataset(data.Dataset):
def __init__(self, opt):
super(CPDataset, self).__init__()
self.opt = opt
self.root = opt.dataroot
self.datamode = opt.datamode
self.stage = opt.stage
self.data_list = opt.data_list
self.fine_height = opt.fine_height
self.fine_width = opt.fine_width
self.radius = opt.radius
self.data_path = osp.join(opt.dataroot, opt.datamode)
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
im_names = []
c_names = []
with open(osp.join(opt.dataroot, opt.data_list), 'r') as f:
for line in f.readlines():
(im_name, c_name) = line.strip().split()
im_names.append(im_name)
c_names.append(c_name)
self.im_names = im_names
self.c_names = c_names
def name(self):
return 'CPDataset'
def __getitem__(self, index):
c_name = self.c_names[index]
im_name = self.im_names[index]
if (self.stage == 'GMM'):
c = Image.open(osp.join(self.data_path, 'cloth', c_name))
cm = Image.open(osp.join(self.data_path, 'cloth-mask', c_name))
else:
c = Image.open(osp.join(self.data_path, 'warp-cloth', c_name))
cm = Image.open(osp.join(self.data_path, 'warp-mask', c_name))
c = self.transform(c)
cm_array = np.array(cm)
cm_array = (cm_array >= 128).astype(np.float32)
cm = torch.from_numpy(cm_array)
cm.unsqueeze_(0)
im = Image.open(osp.join(self.data_path, 'image', im_name))
im = self.transform(im)
parse_name = im_name.replace('.jpg', '.png')
im_parse = Image.open(osp.join(self.data_path, 'image-parse', parse_name))
parse_array = np.array(im_parse)
parse_shape = (parse_array > 0).astype(np.float32)
parse_head = ((((parse_array == 1).astype(np.float32) + (parse_array == 2).astype(np.float32)) + (parse_array == 4).astype(np.float32)) + (parse_array == 13).astype(np.float32))
parse_cloth = (((parse_array == 5).astype(np.float32) + (parse_array == 6).astype(np.float32)) + (parse_array == 7).astype(np.float32))
parse_shape = Image.fromarray((parse_shape * 255).astype(np.uint8))
parse_shape = parse_shape.resize(((self.fine_width // 16), (self.fine_height // 16)), Image.BILINEAR)
parse_shape = parse_shape.resize((self.fine_width, self.fine_height), Image.BILINEAR)
shape = self.transform(parse_shape)
phead = torch.from_numpy(parse_head)
pcm = torch.from_numpy(parse_cloth)
im_c = ((im * pcm) + (1 - pcm))
im_h = ((im * phead) - (1 - phead))
pose_name = im_name.replace('.jpg', '_keypoints.json')
with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:
pose_label = json.load(f)
pose_data = pose_label['people'][0]['pose_keypoints']
pose_data = np.array(pose_data)
pose_data = pose_data.reshape(((- 1), 3))
point_num = pose_data.shape[0]
pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)
r = self.radius
im_pose = Image.new('L', (self.fine_width, self.fine_height))
pose_draw = ImageDraw.Draw(im_pose)
for i in range(point_num):
one_map = Image.new('L', (self.fine_width, self.fine_height))
draw = ImageDraw.Draw(one_map)
pointx = pose_data[(i, 0)]
pointy = pose_data[(i, 1)]
if ((pointx > 1) and (pointy > 1)):
draw.rectangle(((pointx - r), (pointy - r), (pointx + r), (pointy + r)), 'white', 'white')
pose_draw.rectangle(((pointx - r), (pointy - r), (pointx + r), (pointy + r)), 'white', 'white')
one_map = self.transform(one_map)
pose_map[i] = one_map[0]
im_pose = self.transform(im_pose)
agnostic = torch.cat([shape, im_h, pose_map], 0)
if (self.stage == 'GMM'):
im_g = Image.open('grid.png')
im_g = self.transform(im_g)
else:
im_g = ''
result = {'c_name': c_name, 'im_name': im_name, 'cloth': c, 'cloth_mask': cm, 'image': im, 'agnostic': agnostic, 'parse_cloth': im_c, 'shape': shape, 'head': im_h, 'pose_image': im_pose, 'grid_image': im_g}
return result
def __len__(self):
return len(self.im_names) |
class ExpandPure(ExpandTransformation):
environments = []
def expansion(node, parent_state, parent_sdfg):
(inp_tensor, out_tensor) = node.validate(parent_sdfg, parent_state)
sdfg = dace.SDFG(f'{node.label}_sdfg')
(_, inp_arr) = sdfg.add_array('_inp_tensor', inp_tensor.shape, inp_tensor.dtype, inp_tensor.storage, strides=inp_tensor.strides)
(_, out_arr) = sdfg.add_array('_out_tensor', out_tensor.shape, out_tensor.dtype, out_tensor.storage, strides=out_tensor.strides)
state = sdfg.add_state(f'{node.label}_state')
map_params = [f'__i{i}' for i in range(len(inp_arr.shape))]
map_rng = {i: f'0:{s}' for (i, s) in zip(map_params, inp_arr.shape)}
inp_mem = dace.Memlet(expr=f"_inp_tensor[{','.join(map_params)}]")
out_mem = dace.Memlet(expr=f"_out_tensor[{','.join([map_params[i] for i in node.axes])}]")
inputs = {'_inp': inp_mem}
outputs = {'_out': out_mem}
code = f'_out = {node.alpha} * _inp'
if (node.beta != 0):
inputs['_inout'] = out_mem
code = f'_out = {node.alpha} * _inp + {node.beta} * _inout'
state.add_mapped_tasklet(f'{node.label}_tasklet', map_rng, inputs, code, outputs, external_edges=True)
return sdfg |
def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path):
if ('.pt' not in checkpoint_path):
original_checkpoint = _download(_MODELS[checkpoint_path])
else:
original_checkpoint = torch.load(checkpoint_path, map_location='cpu')
dimensions = original_checkpoint['dims']
state_dict = original_checkpoint['model_state_dict']
proj_out_weights = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(state_dict)
rename_keys(state_dict)
tie_embeds = True
ffn_dim = state_dict['decoder.layers.0.fc1.weight'].shape[0]
config = WhisperConfig(vocab_size=dimensions['n_vocab'], encoder_ffn_dim=ffn_dim, decoder_ffn_dim=ffn_dim, num_mel_bins=dimensions['n_mels'], d_model=dimensions['n_audio_state'], max_target_positions=dimensions['n_text_ctx'], encoder_layers=dimensions['n_audio_layer'], encoder_attention_heads=dimensions['n_audio_head'], decoder_layers=dimensions['n_text_layer'], decoder_attention_heads=dimensions['n_text_state'], max_source_positions=dimensions['n_audio_ctx'])
model = WhisperForConditionalGeneration(config)
(missing, unexpected) = model.model.load_state_dict(state_dict, strict=False)
if ((len(missing) > 0) and (not (set(missing) <= {'encoder.embed_positions.weights', 'decoder.embed_positions.weights'}))):
raise ValueError(f'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing, but all the following weights are missing {missing}')
if tie_embeds:
model.proj_out = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
model.proj_out.weight.data = proj_out_weights
model.save_pretrained(pytorch_dump_folder_path) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs='?', type=str, choices=['view', 'export'], help='view: view the images in the lmdb database interactively.\nexport: Export the images in the lmdb databases to a folder. The images are grouped in subfolders determinted by the prefiex of image key.')
parser.add_argument('lmdb_path', nargs='+', type=str, help='The path to the lmdb database folder. Support multiple database paths.')
parser.add_argument('--out_dir', type=str, default='')
parser.add_argument('--flat', action='store_true', help='If enabled, the images are imported into output directory directly instead of hierarchical directories.')
parser.add_argument('--imageType', type=str, default='')
parser.add_argument('--limit', type=int, default='-1')
args = parser.parse_args()
command = args.command
lmdb_paths = args.lmdb_path
for lmdb_path in lmdb_paths:
if (command == 'view'):
view(lmdb_path)
elif (command == 'export'):
export_images(lmdb_path, args.out_dir, args.flat, limit=args.limit, imageType=args.imageType) |
def test_listtype_numpytype_categorical():
t = ListType(NumpyType('int32'), {'__categorical__': True})
assert (str(parser.parse(str(t))) == str(t)) |
class Entropy(_CrossEntropy):
def __init__(self):
super(Entropy, self).__init__(sumit=True)
def forward(self, p):
return super(Entropy, self).forward(p, p) |
def idx_to_onehot(idx, num_elements):
onehot = np.zeros(num_elements, dtype=np.float32)
onehot[idx] = 1.0
return onehot |
class FunctionFieldMorphism_rational(FunctionFieldMorphism):
def __init__(self, parent, im_gen, base_morphism):
FunctionFieldMorphism.__init__(self, parent, im_gen, base_morphism)
def _call_(self, x):
a = x.element()
if (self._base_morphism is None):
return a.subs({a.parent().gen(): self._im_gen})
f = self._base_morphism
num = a.numerator()
den = a.denominator()
R = self._im_gen.parent()['X']
num = R([f(c) for c in num.list()])
den = R([f(c) for c in den.list()])
return (num.subs(self._im_gen) / den.subs(self._im_gen)) |
class BinnedDataset(Dataset):
def __init__(self, df, data_dir, num_bins, num_workers=0, upper_limit=1500, form_dir_name: str='subform_50', use_ray=False, **kwargs):
self.df = df
self.num_bins = num_bins
self.num_workers = num_workers
self.upper_limit = upper_limit
self.bins = np.linspace(0, self.upper_limit, self.num_bins)
self.name_to_adduct = dict(self.df[['spec', 'ionization']].values)
self.smiles = self.df['smiles'].values
if (self.num_workers == 0):
self.fps = [common.get_morgan_fp_smi_wt(i) for i in self.smiles]
else:
self.fps = common.chunked_parallel(self.smiles, common.get_morgan_fp_smi_wt, chunks=100, max_cpu=self.num_workers, timeout=600, max_retries=3, use_ray=use_ray)
(fps, weights) = zip(*[(i, j) for (i, j) in self.fps])
self.fps = np.vstack(fps)
self.weights = np.array(weights)
self.spec_names = self.df['spec'].values
spec_files = [(((data_dir / 'subformulae') / f'{form_dir_name}') / f'{spec_name}.json') for spec_name in self.spec_names]
process_spec_file = (lambda x: common.bin_form_file(x, num_bins=num_bins, upper_limit=upper_limit))
if (self.num_workers == 0):
spec_outputs = [process_spec_file(i) for i in spec_files]
else:
spec_outputs = common.chunked_parallel(spec_files, process_spec_file, chunks=100, max_cpu=self.num_workers, timeout=4000, max_retries=3, use_ray=use_ray)
(self.metas, self.spec_ars) = zip(*spec_outputs)
mask = np.array([(i is not None) for i in self.spec_ars])
logging.info(f'Could not find tables for {np.sum((~ mask))} spec')
self.spec_names = np.array(self.spec_names)[mask]
self.metas = np.array(self.metas)[mask]
self.spec_ars = np.array(self.spec_ars, dtype=object)[mask]
self.spec_ars = np.vstack(self.spec_ars).astype(float)
self.fps = np.array(self.fps)[mask]
self.weights = np.array(self.weights)[mask]
self.df = self.df[mask]
self.adducts = [common.ion2onehot_pos[self.name_to_adduct[i]] for i in self.spec_names]
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
name = self.spec_names[idx]
meta = self.metas[idx]
ar = self.spec_ars[idx]
fp = self.fps[idx]
adduct = self.adducts[idx]
full_weight = self.weights[idx]
outdict = {'name': name, 'binned': ar, 'full_weight': full_weight, 'fp': fp, 'adduct': adduct, '_meta': meta}
return outdict
def get_collate_fn(cls):
return BinnedDataset.collate_fn
def collate_fn(input_list):
names = [j['name'] for j in input_list]
spec_ars = [j['binned'] for j in input_list]
fp_ars = [j['fp'] for j in input_list]
full_weight = [j['full_weight'] for j in input_list]
adducts = [j['adduct'] for j in input_list]
spectra_tensors = torch.stack([torch.tensor(spectra) for spectra in spec_ars])
fp_tensors = torch.stack([torch.tensor(fp) for fp in fp_ars])
full_weight = torch.FloatTensor(full_weight)
adducts = torch.FloatTensor(adducts)
return_dict = {'spectra': spectra_tensors, 'fps': fp_tensors, 'names': names, 'adducts': adducts, 'full_weight': full_weight}
return return_dict |
class TicTacToeGame(Game):
def __init__(self, n=3):
self.n = n
def getInitBoard(self):
b = Board(self.n)
return np.array(b.pieces)
def getBoardSize(self):
return (self.n, self.n)
def getActionSize(self):
return ((self.n * self.n) + 1)
def getNextState(self, board, player, action):
if (action == (self.n * self.n)):
return (board, (- player))
b = Board(self.n)
b.pieces = np.copy(board)
move = (int((action / self.n)), (action % self.n))
b.execute_move(move, player)
return (b.pieces, (- player))
def getValidMoves(self, board, player):
valids = ([0] * self.getActionSize())
b = Board(self.n)
b.pieces = np.copy(board)
legalMoves = b.get_legal_moves(player)
if (len(legalMoves) == 0):
valids[(- 1)] = 1
return np.array(valids)
for (x, y) in legalMoves:
valids[((self.n * x) + y)] = 1
return np.array(valids)
def getGameEnded(self, board, player):
b = Board(self.n)
b.pieces = np.copy(board)
if b.is_win(player):
return 1
if b.is_win((- player)):
return (- 1)
if b.has_legal_moves():
return 0
return 0.0001
def getCanonicalForm(self, board, player):
return (player * board)
def getSymmetries(self, board, pi):
assert (len(pi) == ((self.n ** 2) + 1))
pi_board = np.reshape(pi[:(- 1)], (self.n, self.n))
l = []
for i in range(1, 5):
for j in [True, False]:
newB = np.rot90(board, i)
newPi = np.rot90(pi_board, i)
if j:
newB = np.fliplr(newB)
newPi = np.fliplr(newPi)
l += [(newB, (list(newPi.ravel()) + [pi[(- 1)]]))]
return l
def stringRepresentation(self, board):
return board.tostring()
def display(board):
n = board.shape[0]
print(' ', end='')
for y in range(n):
print(y, '', end='')
print('')
print(' ', end='')
for _ in range(n):
print('-', end='-')
print('--')
for y in range(n):
print(y, '|', end='')
for x in range(n):
piece = board[y][x]
if (piece == (- 1)):
print('X ', end='')
elif (piece == 1):
print('O ', end='')
elif (x == n):
print('-', end='')
else:
print('- ', end='')
print('|')
print(' ', end='')
for _ in range(n):
print('-', end='-')
print('--') |
class UninitializedTensorMixin():
_allowed_methods = [torch.Tensor.__hash__, torch.Tensor.size, torch.Tensor.copy_, torch.Tensor.is_floating_point, torch.Tensor.half, torch.Tensor.float, torch.Tensor.double, torch.Tensor.char, torch.Tensor.short, torch.Tensor.int, torch.Tensor.long, torch.Tensor.cuda, torch.Tensor.cpu, torch.Tensor.to, torch.Tensor.get_device, torch._has_compatible_shallow_copy_type]
def materialize(self, shape, device=None, dtype=None):
if (device is None):
device = self.data.device
if (dtype is None):
dtype = self.data.dtype
self.data = torch.empty(shape, device=device, dtype=dtype)
self.__class__ = self.cls_to_become
def shape(self):
raise RuntimeError("Can't access the shape of an uninitialized parameter or buffer. This error usually happens in `load_state_dict` when trying to load an uninitialized parameter into an initialized one. Call `forward` to initialize the parameters before accessing their attributes.")
def share_memory_(self):
raise RuntimeError("Can't share memory on an uninitialized parameter or buffer. Call `forward` to initialize the parameters before calling `module.share_memory()`.")
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __reduce_ex__(self, proto):
return (self.__class__, (self.requires_grad,))
def __torch_function__(cls, func, types, args=(), kwargs=None):
if ((func in cls._allowed_methods) or (func.__class__.__name__ == 'method-wrapper')):
if (kwargs is None):
kwargs = {}
return super().__torch_function__(func, types, args, kwargs)
raise ValueError('Attempted to use an uninitialized parameter in {}. This error happens when you are using a `LazyModule` or explicitly manipulating `torch.nn.parameter.{}` objects. When using LazyModules Call `forward` with a dummy batch to initialize the parameters before calling torch functions'.format(func, cls.__name__)) |
def register_Ns3FixedRssLossModel_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('SetRss', 'void', [param('double', 'rss')])
cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True)
return |
class PartitionRngStasher():
def __init__(self, device=torch.device('cpu')):
self.device = device
self.state = {}
self.devices = ([self.device] if (self.device.type == 'cuda') else [])
def stash_rng_state(self, micro_batch_index):
cpu_rng_state = torch.get_rng_state()
if (self.device.type == 'cuda'):
with torch.cuda.device(self.device):
gpu_rng_state = torch.cuda.get_rng_state()
else:
gpu_rng_state = None
self.state[micro_batch_index] = (cpu_rng_state, gpu_rng_state)
def restore_rng_state(self, micro_batch_index):
(cpu_rng_state, gpu_rng_state) = self.state.pop(micro_batch_index)
torch.set_rng_state(cpu_rng_state)
if (not (gpu_rng_state is None)):
torch.cuda.set_rng_state(gpu_rng_state, self.device)
def clear_state(self):
self.state.clear() |
def set_lora_diag(model, diag: torch.Tensor):
for _module in model.modules():
if (_module.__class__.__name__ in ['LoraInjectedLinear', 'LoraInjectedConv2d', 'LoraInjectedConv3d']):
_module.set_selector_from_diag(diag) |
_utils.test()
def test_ptr_scalar():
a = ti.field(dtype=ti.f32, shape=())
def func(t: ti.f32):
b = ti.static(a)
c = ti.static(b)
b[None] = (b[None] * t)
c[None] = (a[None] + t)
for (x, y) in zip(range((- 5), 5), range((- 4), 4)):
a[None] = x
func(y)
assert (a[None] == ((x * y) + y)) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313, 999])
def test_gelu_double_backward(seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
backward_function_tester(rng, F.gelu, inputs, ctx=ctx, atol_accum=0.005) |
def tot() -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
operations_graph.append_operation(operations.Generate(1, 20))
operations_graph.append_operation(operations.Score(1, False, utils.num_errors))
keep_best_1 = operations.KeepBestN(1, False)
operations_graph.append_operation(keep_best_1)
for _ in range(3):
operations_graph.append_operation(operations.Generate(1, 20))
operations_graph.append_operation(operations.Score(1, False, utils.num_errors))
keep_best_2 = operations.KeepBestN(1, False)
keep_best_2.add_predecessor(keep_best_1)
operations_graph.append_operation(keep_best_2)
keep_best_1 = keep_best_2
operations_graph.append_operation(operations.KeepBestN(1, False))
operations_graph.append_operation(operations.GroundTruth(utils.test_sorting))
return operations_graph |
def draw_circle(image, circle, offset=(0, 0), color=(0, 0, 255), thickness=1):
center = round_vector((np.array(circle.center) + offset))
cv2.circle(image, center, circle.radius, color, thickness=thickness) |
def start_memory_tracing(modules_to_trace: Optional[Union[(str, Iterable[str])]]=None, modules_not_to_trace: Optional[Union[(str, Iterable[str])]]=None, events_to_trace: str='line', gpus_to_trace: Optional[List[int]]=None) -> MemoryTrace:
if is_psutil_available():
process = psutil.Process(os.getpid())
else:
logger.warning("Psutil not installed, we won't log CPU memory usage. Install psutil (pip install psutil) to use CPU memory tracing.")
process = None
if is_py3nvml_available():
try:
nvml.nvmlInit()
devices = (list(range(nvml.nvmlDeviceGetCount())) if (gpus_to_trace is None) else gpus_to_trace)
nvml.nvmlShutdown()
except (OSError, nvml.NVMLError):
logger.warning("Error while initializing comunication with GPU. We won't perform GPU memory tracing.")
log_gpu = False
else:
log_gpu = (is_torch_available() or is_tf_available())
else:
logger.warning("py3nvml not installed, we won't log GPU memory usage. Install py3nvml (pip install py3nvml) to use GPU memory tracing.")
log_gpu = False
memory_trace = []
def traceit(frame, event, args):
global _is_memory_tracing_enabled
if (not _is_memory_tracing_enabled):
return traceit
if (events_to_trace is not None):
if (isinstance(events_to_trace, str) and (event != events_to_trace)):
return traceit
elif (isinstance(events_to_trace, (list, tuple)) and (event not in events_to_trace)):
return traceit
if ('__name__' not in frame.f_globals):
return traceit
name = frame.f_globals['__name__']
if (not isinstance(name, str)):
return traceit
else:
if (modules_to_trace is not None):
if (isinstance(modules_to_trace, str) and (modules_to_trace not in name)):
return traceit
elif (isinstance(modules_to_trace, (list, tuple)) and all(((m not in name) for m in modules_to_trace))):
return traceit
if (modules_not_to_trace is not None):
if (isinstance(modules_not_to_trace, str) and (modules_not_to_trace in name)):
return traceit
elif (isinstance(modules_not_to_trace, (list, tuple)) and any(((m in name) for m in modules_not_to_trace))):
return traceit
lineno = frame.f_lineno
filename = frame.f_globals['__file__']
if (filename.endswith('.pyc') or filename.endswith('.pyo')):
filename = filename[:(- 1)]
line = linecache.getline(filename, lineno).rstrip()
traced_state = Frame(filename, name, lineno, event, line)
cpu_mem = 0
if (process is not None):
mem = process.memory_info()
cpu_mem = mem.rss
gpu_mem = 0
if log_gpu:
if is_torch_available():
torch_empty_cache()
if is_tf_available():
tf_context.context()._clear_caches()
nvml.nvmlInit()
for i in devices:
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem += meminfo.used
nvml.nvmlShutdown()
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
memory_trace.append(mem_state)
return traceit
sys.settrace(traceit)
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = True
return memory_trace |
def get_answer(solution: Optional[str]) -> Optional[str]:
if (solution is None):
return None
last_boxed = last_boxed_only_string(solution)
if (last_boxed is None):
return None
answer = remove_boxed(last_boxed)
if (answer is None):
return None
return answer |
class LightTorsoHopper(RoboschoolXMLModifierMixin, ModifiableRoboschoolHopper):
def __init__(self):
self.density = 500
with self.modify_xml('hopper.xml') as tree:
for elem in tree.iterfind('worldbody/body/geom'):
elem.set('density', str(self.density))
RoboschoolForwardWalkerMujocoXML.__init__(self, self.model_xml, 'torso', action_dim=3, obs_dim=15, power=0.75)
def parameters(self):
parameters = super(LightTorsoHopper, self).parameters
parameters.update({'density': self.density})
return parameters |
def test_rsl_prims_balltree():
(labels, tree) = robust_single_linkage(X, 0.4, algorithm='prims_balltree')
n_clusters_1 = (len(set(labels)) - int(((- 1) in labels)))
assert (n_clusters_1 == n_clusters)
labels = RobustSingleLinkage(algorithm='prims_balltree').fit(X).labels_
n_clusters_2 = (len(set(labels)) - int(((- 1) in labels)))
assert (n_clusters_2 == n_clusters) |
def load_checkpoint(fpath: str):
if (fpath is None):
raise ValueError('File path is None')
if (not osp.exists(fpath)):
raise FileNotFoundError('File is not found at "{}"'.format(fpath))
map_location = (None if torch.cuda.is_available() else 'cpu')
try:
checkpoint = torch.load(fpath, map_location=map_location)
except UnicodeDecodeError:
pickle.load = partial(pickle.load, encoding='latin1')
pickle.Unpickler = partial(pickle.Unpickler, encoding='latin1')
checkpoint = torch.load(fpath, pickle_module=pickle, map_location=map_location)
except Exception:
print('Unable to load checkpoint from "{}"'.format(fpath))
raise
return checkpoint |
def _create_pretrained_emb_from_txt(vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32, scope=None):
(vocab, _) = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out(('# Using pretrained embedding: %s.' % embed_file))
utils.print_out(' with trainable tokens: ')
(emb_dict, emb_size) = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out((' %s' % token))
if (token not in emb_dict):
emb_dict[token] = ([0.0] * emb_size)
emb_mat = np.array([emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [(- 1), (- 1)])
with tf.variable_scope((scope or 'pretrain_embeddings'), dtype=dtype) as scope:
with tf.device(_get_embed_device(num_trainable_tokens)):
emb_mat_var = tf.get_variable('emb_mat_var', [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0) |
def obj_fpr(result, reference, connectivity=1):
(_, _, _, n_obj_reference, mapping) = __distinct_binary_object_correspondences(reference, result, connectivity)
return ((n_obj_reference - len(mapping)) / float(n_obj_reference)) |
class TensorInfo():
def __init__(self):
self.tensor_id = (- 1)
self.shape = None
self.dtype = DataType.UNKNOWN
self.is_const = False
self.gaddr = (- 1)
self.gsize = 0
self.loffset = (- 1)
self.nslice = 0
self.hslice = 0
self.l2addr = 0
self.in_layer = None
self.out_layers = []
def __str__(self):
shape_str = 'x'.join([str(s) for s in self.shape])
const_str = ('CONST' if self.is_const else '')
slice_str = ''
if ((self.nslice > 0) and (self.hslice > 0)):
slice_str = 'nslice={} hslice={}'.format(self.nslice, self.hslice)
return 'tensor_id={} [{}] {} {} {}'.format(self.tensor_id, shape_str, self.dtype.name, const_str, slice_str) |
def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs-ds', callback=None, maxiter=None, disp=False, presolve=True, time_limit=None, dual_feasibility_tolerance=None, primal_feasibility_tolerance=None, simplex_dual_edge_weight_strategy=None, **unknown_options):
pass |
def fill_gaps2(values):
searchval = [0, 255]
searchval2 = [255, 0]
idx = np.array(np.where(((values[:(- 1)] == searchval[0]) & (values[1:] == searchval[1]))))
idx2 = (np.array(np.where(((values[:(- 1)] == searchval[0]) & (values[1:] == searchval[1])))) + 1)
new = (idx.tolist() + idx2.tolist())
newlist = [item for items in new for item in items]
values[newlist] = 255
return values |
def create_sdfg_from_fortran_file(source_string: str):
parser = pf().create(std='f2008')
reader = ffr(source_string)
ast = parser(reader)
tables = SymbolTable
own_ast = ast_components.InternalFortranAst(ast, tables)
program = own_ast.create_ast(ast)
functions_and_subroutines_builder = ast_transforms.FindFunctionAndSubroutines()
functions_and_subroutines_builder.visit(program)
own_ast.functions_and_subroutines = functions_and_subroutines_builder.nodes
program = ast_transforms.functionStatementEliminator(program)
program = ast_transforms.CallToArray(functions_and_subroutines_builder.nodes).visit(program)
program = ast_transforms.CallExtractor().visit(program)
program = ast_transforms.SignToIf().visit(program)
program = ast_transforms.ArrayToLoop(program).visit(program)
for transformation in own_ast.fortran_intrinsics():
program = transformation(program).visit(program)
program = ast_transforms.ForDeclarer().visit(program)
program = ast_transforms.IndexExtractor(program).visit(program)
ast2sdfg = AST_translator(own_ast, __file__)
sdfg = SDFG(source_string)
ast2sdfg.top_level = program
ast2sdfg.globalsdfg = sdfg
ast2sdfg.translate(program, sdfg)
return sdfg |
class SSIterator(object):
def __init__(self, rng, batch_size, session_file=None, rank_file=None, dtype='int32', can_fit=False, queue_size=100, cache_size=100, shuffle=True, use_infinite_loop=True, max_len=1000):
args = locals()
args.pop('self')
self.__dict__.update(args)
self.has_ranks = (rank_file is not None)
self.load_files()
self.exit_flag = False
def load_files(self):
self.data = cPickle.load(open(self.session_file, 'r'))
self.data_len = len(self.data)
logger.debug(('Data len is %d' % self.data_len))
if self.has_ranks:
self.rank_data = cPickle.load(open(self.rank_file, 'r'))
self.rank_data_len = len(self.rank_data)
assert (self.rank_data_len == self.data_len)
logger.debug(('Rank data len is %d' % self.rank_data_len))
def start(self):
self.exit_flag = False
self.queue = Queue.Queue(maxsize=self.queue_size)
self.gather = SSFetcher(self)
self.gather.daemon = True
self.gather.start()
def __del__(self):
if hasattr(self, 'gather'):
self.gather.exitFlag = True
self.gather.join()
def __iter__(self):
return self
def next(self):
if self.exit_flag:
return None
batch = self.queue.get()
if (not batch):
self.exit_flag = True
return batch |
def common_forward(info, forward_func):
batch_size = 1
class ForwardConfig():
pass
class Args():
pass
args = Args()
config = ForwardConfig
if hasattr(info, 'global_config'):
config.global_config = info.global_config
config.executors = info.executors.values()
config.networks = []
for e in config.executors:
if (e.network.name in info.networks.keys()):
config.networks.append(info.networks[e.network.name])
else:
assert False, '{} is not found.'.format(e.network.name)
normalize = True
for d in info.datasets.values():
args.dataset = d.uri
normalize = d.normalize
break
for e in config.executors:
normalize = (normalize and (not e.no_image_normalization))
data_iterator = (lambda : data_iterator_csv_dataset(uri=args.dataset, batch_size=config.networks[0].batch_size, shuffle=False, normalize=normalize, with_memory_cache=False, with_file_cache=False))
result = []
with data_iterator() as di:
index = 0
while (index < di.size):
data = di.next()
avg = forward_func(args, index, config, data, di.variables)
index += len(avg[0])
result.append(avg[0])
return np.array(result) |
def load_trees(filename, pipeline):
try:
raw_text = load_without_asterisks(filename, 'utf-8')
except UnicodeDecodeError:
raw_text = load_without_asterisks(filename, 'latin-1')
trees = tree_reader.read_trees(''.join(raw_text), broken_ok=True)
filtered_trees = []
for tree in trees:
if (tree.children[0].label is None):
print('Skipping a broken tree (missing label) in {}: {}'.format(filename, tree))
continue
try:
words = tuple(tree.leaf_labels())
except ValueError:
print('Skipping a broken tree (missing preterminal) in {}: {}'.format(filename, tree))
continue
if any((('www.facebook' in pt.label) for pt in tree.preterminals())):
print('Skipping a tree with a weird preterminal label in {}: {}'.format(filename, tree))
continue
tree = tree.prune_none().simplify_labels(CONSTITUENT_SPLIT)
if (len(tree.children) > 1):
print('Found a tree with a non-unary root! {}: {}'.format(filename, tree))
continue
if tree.children[0].is_preterminal():
print('Found a tree with a single preterminal node! {}: {}'.format(filename, tree))
continue
for pt in tree.preterminals():
if (not pt.label):
pt.label = 'UNK'
print('Found a tree with a blank preterminal label. Setting it to UNK. {}: {}'.format(filename, tree))
tree = tree.remap_constituent_labels(REMAP_NODES)
tree = tree.remap_words(REMAP_WORDS)
tree = split_mwe(tree, pipeline)
if (tree is None):
continue
constituents = set(parse_tree.Tree.get_unique_constituent_labels(tree))
for weird_label in NODES_TO_ELIMINATE:
if (weird_label in constituents):
break
else:
weird_label = None
if (weird_label is not None):
print('Skipping a tree with a weird label {} in {}: {}'.format(weird_label, filename, tree))
continue
filtered_trees.append(tree)
return filtered_trees |
class TestSpglib(unittest.TestCase):
def setUp(self):
self._filenames = []
self._ref_filenames = []
self._spgnum_ref = []
for d in dirnames:
dirname = os.path.join(data_dir, 'data', d)
refdirname = os.path.join(data_dir, 'ref', d)
filenames = os.listdir(dirname)
self._spgnum_ref += [int(fname.split('-')[1]) for fname in filenames]
self._filenames += [os.path.join(dirname, fname) for fname in filenames]
self._ref_filenames += [os.path.join(refdirname, (fname + '-ref')) for fname in filenames]
def _create_symref(self):
pass
def tearDown(self):
pass
def test_get_symmetry_dataset(self):
for (fname, spgnum, reffname) in zip(self._filenames, self._spgnum_ref, self._ref_filenames):
cell = read_vasp(fname)
if ('distorted' in fname):
symprec = 0.1
else:
symprec = 1e-05
dataset = get_symmetry_dataset(cell, symprec=symprec)
self.assertEqual(dataset['number'], spgnum, msg=('%s' % fname))
for i in range(spg_to_hall[(spgnum - 1)], spg_to_hall[spgnum]):
dataset = get_symmetry_dataset(cell, hall_number=i, symprec=symprec)
self.assertEqual(type(dataset), dict, msg=('%s/%d' % (fname, i)))
self.assertEqual(dataset['hall_number'], i, msg=('%s' % fname))
spg_type = get_spacegroup_type(dataset['hall_number'])
self.assertEqual(dataset['international'], spg_type['international_short'], msg=('%s' % fname))
self.assertEqual(dataset['hall'], spg_type['hall_symbol'], msg=('%s' % fname))
self.assertEqual(dataset['choice'], spg_type['choice'], msg=('%s' % fname))
self.assertEqual(dataset['pointgroup'], spg_type['pointgroup_international'], msg=('%s' % fname))
wyckoffs = dataset['wyckoffs']
with open(reffname) as f:
wyckoffs_ref = yaml.load(f, Loader=yaml.FullLoader)['wyckoffs']
for (w, w_ref) in zip(wyckoffs, wyckoffs_ref):
self.assertEqual(w, w_ref, msg=('%s' % fname))
def test_standardize_cell_and_pointgroup(self):
for (fname, spgnum) in zip(self._filenames, self._spgnum_ref):
cell = read_vasp(fname)
if ('distorted' in fname):
symprec = 0.1
else:
symprec = 1e-05
std_cell = standardize_cell(cell, to_primitive=False, no_idealize=True, symprec=symprec)
dataset = get_symmetry_dataset(std_cell, symprec=symprec)
self.assertEqual(dataset['number'], spgnum, msg=('%s' % fname))
(ptg_symbol, _, _) = get_pointgroup(dataset['rotations'])
self.assertEqual(dataset['pointgroup'], ptg_symbol, msg=('%s' % fname))
def test_standardize_cell_from_primitive(self):
for (fname, spgnum) in zip(self._filenames, self._spgnum_ref):
cell = read_vasp(fname)
if ('distorted' in fname):
symprec = 0.1
else:
symprec = 1e-05
prim_cell = standardize_cell(cell, to_primitive=True, no_idealize=True, symprec=symprec)
std_cell = standardize_cell(prim_cell, to_primitive=False, no_idealize=True, symprec=symprec)
dataset = get_symmetry_dataset(std_cell, symprec=symprec)
self.assertEqual(dataset['number'], spgnum, msg=('%s' % fname))
def test_standardize_cell_to_primitive(self):
for (fname, spgnum) in zip(self._filenames, self._spgnum_ref):
cell = read_vasp(fname)
if ('distorted' in fname):
symprec = 0.1
else:
symprec = 1e-05
prim_cell = standardize_cell(cell, to_primitive=True, no_idealize=True, symprec=symprec)
dataset = get_symmetry_dataset(prim_cell, symprec=symprec)
self.assertEqual(dataset['number'], spgnum, msg=('%s' % fname))
def test_refine_cell(self):
for (fname, spgnum) in zip(self._filenames, self._spgnum_ref):
cell = read_vasp(fname)
if ('distorted' in fname):
dataset_0 = get_symmetry_dataset(cell, symprec=0.1)
else:
dataset_0 = get_symmetry_dataset(cell, symprec=1e-05)
ref_cell_0 = (dataset_0['std_lattice'], dataset_0['std_positions'], dataset_0['std_types'])
dataset_1 = get_symmetry_dataset(ref_cell_0, symprec=1e-05)
self.assertEqual(dataset_1['number'], spgnum, msg=('%s' % fname))
if (('cubic' in fname) or ('hexagonal' in fname) or ('monoclinic' in fname) or ('orthorhombic' in fname) or ('tetragonal' in fname) or ('triclinic' in fname) or ('trigonal' in fname) or ('distorted' in fname)):
ref_cell_1 = (dataset_1['std_lattice'], dataset_1['std_positions'], dataset_1['std_types'])
dataset_2 = get_symmetry_dataset(ref_cell_1, symprec=1e-05)
np.testing.assert_equal(dataset_1['std_types'], dataset_2['std_types'], err_msg=('%s' % fname))
np.testing.assert_allclose(dataset_1['std_lattice'], dataset_2['std_lattice'], atol=1e-05, err_msg=('%s' % fname))
diff = (dataset_1['std_positions'] - dataset_2['std_positions'])
diff -= np.rint(diff)
np.testing.assert_allclose(diff, 0, atol=1e-05, err_msg=('%s' % fname))
def test_find_primitive(self):
for fname in self._filenames:
cell = read_vasp(fname)
if ('distorted' in fname):
symprec = 0.1
else:
symprec = 1e-05
dataset = get_symmetry_dataset(cell, symprec=symprec)
primitive = find_primitive(cell, symprec=symprec)
spg_type = get_spacegroup_type(dataset['hall_number'])
c = spg_type['international_short'][0]
if (c in ['A', 'B', 'C', 'I']):
multiplicity = 2
elif (c == 'F'):
multiplicity = 4
elif (c == 'R'):
self.assertEqual(spg_type['choice'], 'H')
if (spg_type['choice'] == 'H'):
multiplicity = 3
else:
multiplicity = 1
else:
multiplicity = 1
self.assertEqual(len(dataset['std_types']), (len(primitive[2]) * multiplicity), msg=('multi: %d, %s' % (multiplicity, fname)))
def test_magnetic_spacegroup_type(self):
actual1 = get_magnetic_spacegroup_type(1279)
expect1 = {'uni_number': 1279, 'litvin_number': 1279, 'bns_number': '156.49', 'og_number': '156.1.1279', 'number': 156, 'type': 1}
assert (actual1 == expect1)
actual2 = get_magnetic_spacegroup_type(452)
expect2 = {'uni_number': 452, 'litvin_number': 442, 'bns_number': '55.354', 'og_number': '55.2.442', 'number': 55, 'type': 2}
assert (actual2 == expect2)
actual3 = get_magnetic_spacegroup_type(1262)
expect3 = {'uni_number': 1262, 'litvin_number': 1270, 'bns_number': '151.32', 'og_number': '153.4.1270', 'number': 151, 'type': 4}
assert (actual3 == expect3)
def test_magnetic_symmetry_database(self):
data_h_actual = get_magnetic_symmetry_from_database(1242)
for key in ['rotations', 'translations', 'time_reversals']:
assert (len(data_h_actual[key]) == 18)
data_r_actual = get_magnetic_symmetry_from_database(1242, hall_number=434)
data_r_expect = {'rotations': np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]], dtype=np.int32), 'translations': np.array([[0, 0, 0], [0, 0, 0], [0.5, 0.5, 0.5], [0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]), 'time_reversals': np.array([[0, 0, 1, 0, 1, 1]])}
for key in ['rotations', 'translations', 'time_reversals']:
assert np.allclose(data_r_actual[key], data_r_expect[key]) |
def get_model_url(data, name):
return join(WEB_ROOT, data.name, '{}-{}.pth'.format(name, data.model_hash[name])) |
def test_cc_head():
head = CCHead(in_channels=16, channels=8, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'cca')
if (not torch.cuda.is_available()):
pytest.skip('CCHead requires CUDA')
inputs = [torch.randn(1, 16, 23, 23)]
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 23, 23)) |
def collate_fn_checker():
dataBatch = list()
inpLens = [10, 8, 7, 10]
trgtLens = [4, 6, 7, 10]
for i in range(len(inpLens)):
audInp = torch.from_numpy(np.random.rand((4 * inpLens[i]), args['AUDIO_FEATURE_SIZE']))
vidInp = torch.from_numpy(np.random.rand(inpLens[i], args['TX_NUM_FEATURES']))
inp = (audInp, vidInp)
trgt = torch.from_numpy(np.random.randint(0, args['NUM_CLASSES'], trgtLens[i]))
inpLen = torch.tensor(inpLens[i])
trgtLen = torch.tensor(trgtLens[i])
data = (inp, trgt, inpLen, trgtLen)
dataBatch.append(data)
(inputBatch, targetBatch, inputLenBatch, targetLenBatch) = collate_fn(dataBatch)
print((inputBatch[0].shape, inputBatch[1].shape), targetBatch.shape, inputLenBatch.shape, targetLenBatch.shape)
return |
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(AverageMeter)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
count = 1
if isinstance(v, torch.Tensor):
if (v.numel() == 1):
v = v.item()
else:
count = v.numel()
v = v.sum().item()
assert isinstance(v, (float, int))
self.meters[k].update(v, count)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
return object.__getattr__(self, attr)
def __str__(self):
metric_str = []
for (name, meter) in self.meters.items():
metric_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.avg, meter.global_avg))
return self.delimiter.join(metric_str)
def summary_str(self):
metric_str = []
for (name, meter) in self.meters.items():
metric_str.append('{}: {:.4f}'.format(name, meter.global_avg))
return self.delimiter.join(metric_str) |
def _get_nan(*data):
data = [np.asarray(item) for item in data]
try:
dtype = np.result_type(*data, np.half)
except DTypePromotionError:
return np.array(np.nan, dtype=np.float64)[()]
return np.array(np.nan, dtype=dtype)[()] |
class Metric(ABC):
def __init__(self, **kwargs) -> None:
super().__init__()
self._kwargs = kwargs
self.prefix = os.path.splitext(os.path.basename(inspect.getfile(self.__class__)))[0]
self.requires_decoded = False
def __call__(self, id_to_pred, id_to_labels, is_decoded=False):
if (self.requires_decoded and (is_decoded is False)):
id_to_pred = self._decode(id_to_pred)
id_to_labels = self._decode(id_to_labels)
return self._compute_metrics(id_to_pred, id_to_labels)
def _compute_metrics(self, id_to_pred, id_to_labels) -> Dict[(str, float)]:
return
def _decode(self, id_to_something):
tokenizer = self._kwargs.get('tokenizer')
data_args = self._kwargs.get('data_args')
return decode(id_to_something, tokenizer, data_args) |
class EquivarianceWidget():
def __init__(self, viz):
self.viz = viz
self.xlate = dnnlib.EasyDict(x=0, y=0, anim=False, round=False, speed=0.01)
self.xlate_def = dnnlib.EasyDict(self.xlate)
self.rotate = dnnlib.EasyDict(val=0, anim=False, speed=0.005)
self.rotate_def = dnnlib.EasyDict(self.rotate)
self.opts = dnnlib.EasyDict(untransform=False)
self.opts_def = dnnlib.EasyDict(self.opts)
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
if show:
imgui.text('Translate')
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 8)):
(_changed, (self.xlate.x, self.xlate.y)) = imgui.input_float2('##xlate', self.xlate.x, self.xlate.y, format='%.4f')
imgui.same_line(((viz.label_w + (viz.font_size * 8)) + viz.spacing))
(_clicked, dragging, dx, dy) = imgui_utils.drag_button('Drag fast##xlate', width=viz.button_w)
if dragging:
self.xlate.x += ((dx / viz.font_size) * 0.02)
self.xlate.y += ((dy / viz.font_size) * 0.02)
imgui.same_line()
(_clicked, dragging, dx, dy) = imgui_utils.drag_button('Drag slow##xlate', width=viz.button_w)
if dragging:
self.xlate.x += ((dx / viz.font_size) * 0.0004)
self.xlate.y += ((dy / viz.font_size) * 0.0004)
imgui.same_line()
(_clicked, self.xlate.anim) = imgui.checkbox('Anim##xlate', self.xlate.anim)
imgui.same_line()
(_clicked, self.xlate.round) = imgui.checkbox('Round##xlate', self.xlate.round)
imgui.same_line()
with imgui_utils.item_width((((- 1) - viz.button_w) - viz.spacing)), imgui_utils.grayed_out((not self.xlate.anim)):
(changed, speed) = imgui.slider_float('##xlate_speed', self.xlate.speed, 0, 0.5, format='Speed %.5f', power=5)
if changed:
self.xlate.speed = speed
imgui.same_line()
if imgui_utils.button('Reset##xlate', width=(- 1), enabled=(self.xlate != self.xlate_def)):
self.xlate = dnnlib.EasyDict(self.xlate_def)
if show:
imgui.text('Rotate')
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 8)):
(_changed, self.rotate.val) = imgui.input_float('##rotate', self.rotate.val, format='%.4f')
imgui.same_line(((viz.label_w + (viz.font_size * 8)) + viz.spacing))
(_clicked, dragging, dx, _dy) = imgui_utils.drag_button('Drag fast##rotate', width=viz.button_w)
if dragging:
self.rotate.val += ((dx / viz.font_size) * 0.02)
imgui.same_line()
(_clicked, dragging, dx, _dy) = imgui_utils.drag_button('Drag slow##rotate', width=viz.button_w)
if dragging:
self.rotate.val += ((dx / viz.font_size) * 0.0004)
imgui.same_line()
(_clicked, self.rotate.anim) = imgui.checkbox('Anim##rotate', self.rotate.anim)
imgui.same_line()
with imgui_utils.item_width((((- 1) - viz.button_w) - viz.spacing)), imgui_utils.grayed_out((not self.rotate.anim)):
(changed, speed) = imgui.slider_float('##rotate_speed', self.rotate.speed, (- 1), 1, format='Speed %.4f', power=3)
if changed:
self.rotate.speed = speed
imgui.same_line()
if imgui_utils.button('Reset##rotate', width=(- 1), enabled=(self.rotate != self.rotate_def)):
self.rotate = dnnlib.EasyDict(self.rotate_def)
if show:
imgui.set_cursor_pos_x((((imgui.get_content_region_max()[0] - 1) - (viz.button_w * 1)) - (viz.font_size * 16)))
(_clicked, self.opts.untransform) = imgui.checkbox('Untransform', self.opts.untransform)
imgui.same_line(((imgui.get_content_region_max()[0] - 1) - viz.button_w))
if imgui_utils.button('Reset##opts', width=(- 1), enabled=(self.opts != self.opts_def)):
self.opts = dnnlib.EasyDict(self.opts_def)
if self.xlate.anim:
c = np.array([self.xlate.x, self.xlate.y], dtype=np.float64)
t = c.copy()
if (np.max(np.abs(t)) < 0.0001):
t += 1
t *= (0.1 / np.hypot(*t))
t += (c[::(- 1)] * [1, (- 1)])
d = (t - c)
d *= ((viz.frame_delta * self.xlate.speed) / np.hypot(*d))
self.xlate.x += d[0]
self.xlate.y += d[1]
if self.rotate.anim:
self.rotate.val += (viz.frame_delta * self.rotate.speed)
pos = np.array([self.xlate.x, self.xlate.y], dtype=np.float64)
if (self.xlate.round and ('img_resolution' in viz.result)):
pos = (np.rint((pos * viz.result.img_resolution)) / viz.result.img_resolution)
angle = ((self.rotate.val * np.pi) * 2)
viz.args.input_transform = [[np.cos(angle), np.sin(angle), pos[0]], [(- np.sin(angle)), np.cos(angle), pos[1]], [0, 0, 1]]
viz.args.update(untransform=self.opts.untransform) |
class GloVe(Vectors):
def __init__(self, path: str, encoding=None, **kwargs):
if os.path.exists(f'{path}.pt'):
(itos, vectors) = self.load_from_cache(path)
else:
(itos, vectors) = _load_from_file(path, encoding)
self.save_to_cache(path, itos, vectors)
super().__init__(itos, vectors, **kwargs) |
def test_highlevel_datetime64_ArrayBuilder():
builder = ak.highlevel.ArrayBuilder()
dt = np.datetime64('2020-03-27T10:41:12', '25us')
dt1 = np.datetime64('2020-03-27T10:41', '15s')
dt2 = np.datetime64('2020-05')
builder.datetime(dt1)
builder.datetime('2020-03-27T10:41:11')
builder.datetime(dt)
builder.datetime('2021-03-27')
builder.datetime('2020-03-27T10:41:13')
builder.timedelta(np.timedelta64(5, 's'))
builder.datetime(dt2)
builder.datetime('2020-05-01T00:00:00.000000')
builder.datetime('2020-07-27T10:41:11.200000')
builder.integer(1)
builder.timedelta(np.timedelta64(5, 's'))
assert (to_list(builder.snapshot()) == [np.datetime64('2020-03-27T10:41', '15s'), np.datetime64('2020-03-27T10:41:11'), np.datetime64('2020-03-27T10:41:12', '25us'), np.datetime64('2021-03-27'), np.datetime64('2020-03-27T10:41:13'), np.timedelta64(5, 's'), np.datetime64('2020-05'), np.datetime64('2020-05-01T00:00:00.000000'), np.datetime64('2020-07-27T10:41:11.200000'), 1, np.timedelta64(5, 's')]) |
def _calc_tgt(src: int, die) -> int:
return (((src >= 24) * (jnp.int32(die) - 1)) + ((src < 24) * jnp.int32(_from_board(src, die)))) |
def convert_spans_into_sequence_of_tags(tag_matrix: List[str], max_span_width: int, sentence_length: int) -> List[int]:
tag_sequence = ['O' for _ in range(sentence_length)]
for end_idx in range(sentence_length):
for diff in range(max_span_width):
if (diff > end_idx):
break
span_tag = tag_matrix[((end_idx * max_span_width) + diff)]
if (span_tag == '*'):
continue
start_idx = (end_idx - diff)
for position in range(start_idx, (end_idx + 1)):
assert (tag_sequence[position] == 'O')
tag_sequence[position] = span_tag
return tag_sequence |
def get_layer_dtype(layer):
if layer.in_tensors:
return layer.in_tensors[0].dtype.name
if layer.out_tensors:
return layer.out_tensors[0].dtype.name
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.