code stringlengths 101 5.91M |
|---|
def parse_args():
parser = argparse.ArgumentParser(description='Video Pose Network')
parser.add_argument('--dataset', default='ntu60', type=str, choices=['ntu60', 'ntu120', 'smarthomes', 'nucla'], help='training dataset')
parser.add_argument('--epochs', default=250, type=int, help='max mumber of epochs for training')
parser.add_argument('--num_gpus', default=0, nargs='+', type=int, help='gpu ids for training')
parser.add_argument('--model_name', default='vpn', type=str, choices=['vpn', 'i3d'], help='Model to use for training/validation')
parser.add_argument('--part', default='full_body', type=str, choices=['full_body', 'left_part', 'right_part'], help='part of the body to use for training')
parser.add_argument('--use_gpu', default=True, help='GPU to use for training/validation')
parser.add_argument('--multi_gpu', default=True, help='Use multiple GPUs for training/validation')
parser.add_argument('--batch_size', default=4, help='set batch size')
parser.add_argument('--nw', default=16, type=int, help='number of worker to load data')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum to use for training')
parser.add_argument('--lr', default=0.01, type=float, help='lr to use for training')
parser.add_argument('--weights_loc', default='./checkpoint', type=str, help='location to save the weights of the model')
parser.add_argument('--optim', default='SGD', type=str, choices=['SGD', 'Adam'])
parser.add_argument('--mode', default='train', type=str, choices=['train', 'test'])
parser.add_argument('--n_dropout', default=0.3, type=float, help='dropout to use in GCNN')
parser.add_argument('--multi_proc', default=True, help='Use multiprocessing for training')
parser.add_argument('--num_classes', default=60, type=int, help='number of action classes')
parser.add_argument('--protocol', default='cv', type=str, help='training/validation protocol for different datasets')
parser.add_argument('--num_nodes', default=25, type=int, help='number of graph nodes to consider for a given pose data')
parser.add_argument('--stack_size', default=16, type=int, help='clip width for training/testing')
parser.add_argument('--num_neurons', default=64, type=int, help='number of nodes in GCNN')
parser.add_argument('--timesteps', default=16, type=int, help='video clip size')
parser.add_argument('--sym_norm', action='store_false', help='Symmetric Normalization flag for Graph Conv')
parser.add_argument('--alpha', default=5, type=int, help='Edge weights for direct node connections')
parser.add_argument('--beta', default=2, type=int, help='Edge weights for indirect node connections')
parser.add_argument('--num_features', default=3, type=int, help='Initial feature width')
parser.add_argument('--num_filters', default=2, type=int, help='Number of Filters for GCNN conv operation')
parser.add_argument('--action_wt', default=99.9, type=float, help='weight for action recognition loss')
parser.add_argument('--embed_wt', default=0.1, type=float, help='weight for feature embedding loss')
parser.add_argument('--monitor', default='val_loss', type=str, help='Loss to monitor in the logger')
parser.add_argument('--factor', default=0.1, type=float, help='logger factor')
parser.add_argument('--patience', default=5, type=int, help='number of epochs to wait before reducing LR')
args = parser.parse_args()
return args |
def get_open_fds():
import subprocess
import os
pid = os.getpid()
procs = subprocess.check_output(['lsof', '-w', '-Ff', '-p', str(pid)])
procs = procs.decode('utf-8')
procs = procs.split('\n')
procs = list(filter((lambda s: (s and (s[0] == 'f') and s[1:].isdigit())), procs))
return procs |
def test_orthogonal_procrustes_ndim_too_large():
np.random.seed(1234)
A = np.random.randn(3, 4, 5)
B = np.random.randn(3, 4, 5)
assert_raises(ValueError, orthogonal_procrustes, A, B) |
def test_sanity_compute_3(simpledf: dd.DataFrame) -> None:
config = {'hist.bins': 20, 'bar.bars': 15}
cfg = Config.from_dict(config=config)
itmdt = compute_missing(simpledf, col1='d', cfg=cfg)
render_missing(itmdt, cfg) |
class FfmpegFormat(Format):
def _can_read(self, request):
if (request.mode[1] not in 'I?'):
return False
if (request.filename in [('<video%i>' % i) for i in range(10)]):
return True
if (request.extension in self.extensions):
return True
def _can_write(self, request):
if (request.mode[1] in (self.modes + '?')):
if (request.extension in self.extensions):
return True
class Reader(Format.Reader):
_frame_catcher = None
_read_gen = None
def _get_cam_inputname(self, index):
if sys.platform.startswith('linux'):
return ('/dev/' + self.request._video[1:(- 1)])
elif sys.platform.startswith('win'):
ffmpeg_api = _get_ffmpeg_api()
cmd = [ffmpeg_api.get_ffmpeg_exe(), '-list_devices', 'true', '-f', CAM_FORMAT, '-i', 'dummy']
proc = sp.Popen(cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
proc.stdout.readline()
proc.terminate()
infos = proc.stderr.read().decode('utf-8', errors='ignore')
try:
name = parse_device_names(infos)[index]
except IndexError:
raise IndexError(('No ffdshow camera at index %i.' % index))
return ('video=%s' % name)
elif sys.platform.startswith('darwin'):
name = str(index)
return name
else:
return '??'
def _open(self, loop=False, size=None, dtype=None, pixelformat=None, print_info=False, ffmpeg_params=None, input_params=None, output_params=None, fps=None):
self._ffmpeg_api = _get_ffmpeg_api()
self._arg_loop = bool(loop)
if (size is None):
self._arg_size = None
elif isinstance(size, tuple):
self._arg_size = ('%ix%i' % size)
elif (isinstance(size, str) and ('x' in size)):
self._arg_size = size
else:
raise ValueError('FFMPEG size must be tuple of "NxM"')
if (pixelformat is None):
pass
elif (not isinstance(pixelformat, str)):
raise ValueError('FFMPEG pixelformat must be str')
if (dtype is None):
self._dtype = np.dtype('uint8')
else:
self._dtype = np.dtype(dtype)
allowed_dtypes = ['uint8', 'uint16']
if (self._dtype.name not in allowed_dtypes):
raise ValueError('dtype must be one of: {}'.format(', '.join(allowed_dtypes)))
self._arg_pixelformat = pixelformat
self._arg_input_params = (input_params or [])
self._arg_output_params = (output_params or [])
self._arg_input_params += (ffmpeg_params or [])
self.request._video = None
if (self.request.filename in [('<video%i>' % i) for i in range(10)]):
self.request._video = self.request.filename
if self.request._video:
if ('-framerate' not in str(self._arg_input_params)):
self._arg_input_params.extend(['-framerate', str(float((fps or 15)))])
if self.request._video:
index = int(self.request._video[(- 2)])
self._filename = self._get_cam_inputname(index)
else:
self._filename = self.request.get_local_filename()
self._filename = self._filename.replace('^', '^^')
self._depth = 3
if (self._dtype.name == 'uint8'):
self._pix_fmt = 'rgb24'
self._bytes_per_channel = 1
else:
self._pix_fmt = 'rgb48le'
self._bytes_per_channel = 2
self._pos = (- 1)
self._meta = {'plugin': 'ffmpeg'}
self._lastread = None
self._nframes = float('inf')
if (self._arg_loop and (not self.request._video)):
self._nframes = self.count_frames()
self._meta['nframes'] = self._nframes
self._initialize()
if self.request._video:
self._frame_catcher = FrameCatcher(self._read_gen)
def _close(self):
if (self._frame_catcher is not None):
self._frame_catcher.stop_me()
self._frame_catcher = None
if (self._read_gen is not None):
self._read_gen.close()
self._read_gen = None
def count_frames(self):
cf = self._ffmpeg_api.count_frames_and_secs
return cf(self._filename)[0]
def _get_length(self):
return self._nframes
def _get_data(self, index):
if (self._arg_loop and (self._nframes < float('inf'))):
index %= self._nframes
if (index == self._pos):
return (self._lastread, dict(new=False))
elif (index < 0):
raise IndexError('Frame index must be >= 0')
elif (index >= self._nframes):
raise IndexError('Reached end of video')
else:
if ((index < self._pos) or (index > (self._pos + 100))):
self._initialize(index)
else:
self._skip_frames(((index - self._pos) - 1))
(result, is_new) = self._read_frame()
self._pos = index
return (result, dict(new=is_new))
def _get_meta_data(self, index):
return self._meta
def _initialize(self, index=0):
if (self._read_gen is not None):
self._read_gen.close()
iargs = []
oargs = []
iargs += self._arg_input_params
if self.request._video:
iargs += ['-f', CAM_FORMAT]
if self._arg_pixelformat:
iargs += ['-pix_fmt', self._arg_pixelformat]
if self._arg_size:
iargs += ['-s', self._arg_size]
elif (index > 0):
starttime = (index / self._meta['fps'])
seek_slow = min(10, starttime)
seek_fast = (starttime - seek_slow)
iargs += ['-ss', ('%.06f' % seek_fast)]
oargs += ['-ss', ('%.06f' % seek_slow)]
if self._arg_size:
oargs += ['-s', self._arg_size]
if self.request.kwargs.get('fps', None):
fps = float(self.request.kwargs['fps'])
oargs += ['-r', ('%.02f' % fps)]
oargs += self._arg_output_params
pix_fmt = self._pix_fmt
bpp = (self._depth * self._bytes_per_channel)
rf = self._ffmpeg_api.read_frames
self._read_gen = rf(self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs)
if self.request._video:
try:
meta = self._read_gen.__next__()
except IOError as err:
err_text = str(err)
if ('darwin' in sys.platform):
if ("Unknown input format: 'avfoundation'" in err_text):
err_text += 'Try installing FFMPEG using home brew to get a version with support for cameras.'
raise IndexError('No camera at {}.\n\n{}'.format(self.request._video, err_text))
else:
self._meta.update(meta)
elif (index == 0):
self._meta.update(self._read_gen.__next__())
else:
self._read_gen.__next__()
def _skip_frames(self, n=1):
for i in range(n):
self._read_gen.__next__()
self._pos += n
def _read_frame(self):
(w, h) = self._meta['size']
framesize = (((w * h) * self._depth) * self._bytes_per_channel)
if self._frame_catcher:
(s, is_new) = self._frame_catcher.get_frame()
else:
s = self._read_gen.__next__()
is_new = True
if (len(s) != framesize):
raise RuntimeError(('Frame is %i bytes, but expected %i.' % (len(s), framesize)))
result = np.frombuffer(s, dtype=self._dtype).copy()
result = result.reshape((h, w, self._depth))
self._lastread = result
return (result, is_new)
class Writer(Format.Writer):
_write_gen = None
def _open(self, fps=10, codec='libx264', bitrate=None, pixelformat='yuv420p', ffmpeg_params=None, input_params=None, output_params=None, ffmpeg_log_level='quiet', quality=5, macro_block_size=16):
self._ffmpeg_api = _get_ffmpeg_api()
self._filename = self.request.get_local_filename()
self._pix_fmt = None
self._depth = None
self._size = None
def _close(self):
if (self._write_gen is not None):
self._write_gen.close()
self._write_gen = None
def _append_data(self, im, meta):
(h, w) = im.shape[:2]
size = (w, h)
depth = (1 if (im.ndim == 2) else im.shape[2])
im = image_as_uint(im, bitdepth=8)
if (not im.flags.c_contiguous):
im = np.ascontiguousarray(im)
if (self._size is None):
map = {1: 'gray', 2: 'gray8a', 3: 'rgb24', 4: 'rgba'}
self._pix_fmt = map.get(depth, None)
if (self._pix_fmt is None):
raise ValueError('Image must have 1, 2, 3 or 4 channels')
self._size = size
self._depth = depth
self._initialize()
if (size != self._size):
raise ValueError('All images in a movie should have same size')
if (depth != self._depth):
raise ValueError('All images in a movie should have same number of channels')
assert (self._write_gen is not None)
self._write_gen.send(im)
def set_meta_data(self, meta):
raise RuntimeError('The ffmpeg format does not support setting meta data.')
def _initialize(self):
if (self._write_gen is not None):
self._write_gen.close()
fps = self.request.kwargs.get('fps', 10)
codec = self.request.kwargs.get('codec', None)
bitrate = self.request.kwargs.get('bitrate', None)
quality = self.request.kwargs.get('quality', None)
input_params = (self.request.kwargs.get('input_params') or [])
output_params = (self.request.kwargs.get('output_params') or [])
output_params += (self.request.kwargs.get('ffmpeg_params') or [])
pixelformat = self.request.kwargs.get('pixelformat', None)
macro_block_size = self.request.kwargs.get('macro_block_size', 16)
ffmpeg_log_level = self.request.kwargs.get('ffmpeg_log_level', None)
macro_block_size = (macro_block_size or 1)
self._write_gen = self._ffmpeg_api.write_frames(self._filename, self._size, pix_fmt_in=self._pix_fmt, pix_fmt_out=pixelformat, fps=fps, quality=quality, bitrate=bitrate, codec=codec, macro_block_size=macro_block_size, ffmpeg_log_level=ffmpeg_log_level, input_params=input_params, output_params=output_params)
self._write_gen.send(None) |
class CutoffTimeBasedStragglerHandling(StragglerHandlingFunction):
def __init__(self, round_start_time=None, straggler_cutoff_time=np.inf, minimum_reporting=1, **kwargs):
self.round_start_time = round_start_time
self.straggler_cutoff_time = straggler_cutoff_time
self.minimum_reporting = minimum_reporting
def straggler_time_expired(self):
return ((self.round_start_time is not None) and ((time.time() - self.round_start_time) > self.straggler_cutoff_time))
def minimum_collaborators_reported(self, num_collaborators_done):
return (num_collaborators_done >= self.minimum_reporting)
def straggler_cutoff_check(self, num_collaborators_done, all_collaborators=None):
cutoff = (self.straggler_time_expired() and self.minimum_collaborators_reported(num_collaborators_done))
return cutoff |
class TranslationUnitSaveError(Exception):
ERROR_UNKNOWN = 1
ERROR_TRANSLATION_ERRORS = 2
ERROR_INVALID_TU = 3
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if ((enumeration < 1) or (enumeration > 3)):
raise Exception(('Encountered undefined TranslationUnit save error constant: %d. Please file a bug to have this value supported.' % enumeration))
self.save_error = enumeration
Exception.__init__(self, ('Error %d: %s' % (enumeration, message))) |
_utils.test(require=ti.extension.sparse)
def test_pointer2():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.pointer(ti.i, n).dense(ti.i, n).place(x)
ti.root.place(s)
def activate():
for i in range((n * n)):
x[i] = i
def func():
for i in x:
s[None] += i
activate()
func()
N = (n * n)
assert (s[None] == ((N * (N - 1)) / 2)) |
def test_default_parameters_BlockBootstrap() -> None:
cv = BlockBootstrap()
assert (cv.n_resamplings == 30)
assert (cv.length is None)
assert (cv.n_blocks is None)
assert (not cv.overlapping)
assert (cv.random_state is None) |
class InceptionV3(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, padding=1)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3, padding=1)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout2d()
self.linear = nn.Linear(2048, num_classes)
def forward(self, x, with_latent=False, fake_relu=False, no_relu=False):
assert ((not fake_relu) and (not no_relu)), 'fake_relu and no_relu not yet supported for this architecture'
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
x = self.avgpool(x)
x = self.dropout(x)
latent = x.view(x.size(0), (- 1))
out = self.linear(latent)
if with_latent:
return (out, latent)
return out |
class DatasetEvaluator():
def reset(self):
pass
def process(self, input, output):
pass
def evaluate(self):
pass |
def threeway_split(n, k_validate, k_test, exclude=[]):
full = generate_indices(n, exclude)
(model_building, test) = generate_distinct_sets(full, k_test)
(rest, validate) = generate_distinct_sets(model_building, k_validate)
return (rest, validate, test) |
def train(model, device, train_loader, criterion, optimizer, scheduler, epoch, iter_meter, experiment):
model.train()
data_len = len(train_loader.dataset)
with experiment.train():
for (batch_idx, _data) in enumerate(train_loader):
(spectrograms, labels, input_lengths, label_lengths) = _data
(spectrograms, labels) = (spectrograms.to(device), labels.to(device))
optimizer.zero_grad()
output = model(spectrograms)
output = F.log_softmax(output, dim=2)
output = output.transpose(0, 1)
loss = criterion(output, labels, input_lengths, label_lengths)
loss.backward()
experiment.log_metric('loss', loss.item(), step=iter_meter.get())
experiment.log_metric('learning_rate', scheduler.get_lr(), step=iter_meter.get())
optimizer.step()
scheduler.step()
iter_meter.step()
if (((batch_idx % 100) == 0) or (batch_idx == data_len)):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(spectrograms)), data_len, ((100.0 * batch_idx) / len(train_loader)), loss.item())) |
def test_entered_for_loop_full_loop_not_entered(simple_module, tracer_mock):
adapter = BranchCoverageInstrumentation(tracer_mock)
transformer = InstrumentationTransformer(tracer_mock, [adapter])
simple_module.full_for_loop.__code__ = transformer.instrument_module(simple_module.full_for_loop.__code__)
tracer_mock.register_predicate.assert_called_once()
simple_module.full_for_loop(0)
tracer_mock.executed_bool_predicate.assert_called_with(False, 0) |
class SimpleProgressBar(BaseProgressBar):
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for (i, obj) in enumerate(self.iterable, start=self.offset):
self.i = i
(yield obj)
def log(self, stats, tag=None, step=None):
step = (step or self.i or 0)
if ((step > 0) and (self.log_interval is not None) and ((step % self.log_interval) == 0)):
stats = self._format_stats(stats)
postfix = self._str_commas(stats)
with rename_logger(logger, tag):
logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, (self.i + 1), self.size, postfix))
def print(self, stats, tag=None, step=None):
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix)) |
.parametrize('observation_shape', [(4,), ((4,), (8,))])
.parametrize('action_size', [2])
.parametrize('length', [100])
.parametrize('partial_length', [10])
.parametrize('batch_size', [32])
.parametrize('picker', [None, BasicTransitionPicker()])
.parametrize('slicer', [None, BasicTrajectorySlicer()])
def test_replay_buffer_sample(observation_shape: Shape, action_size: int, length: int, partial_length: int, batch_size: int, picker: Optional[BasicTransitionPicker], slicer: Optional[BasicTrajectorySlicer]) -> None:
episode = create_episode(observation_shape, action_size, length)
replay_buffer = ReplayBuffer(InfiniteBuffer(), episodes=[episode], transition_picker=picker, trajectory_slicer=slicer)
batch = replay_buffer.sample_transition_batch(batch_size)
assert (len(batch) == batch_size)
traj_batch = replay_buffer.sample_trajectory_batch(batch_size, partial_length)
assert (len(traj_batch) == batch_size) |
def parse_match_from_known_labels(graph_parse, known_labels):
assert isinstance(graph_parse, GraphParse)
match_dict = {}
point_key_dict = {}
offset = graph_parse.image_segment_parse.diagram_image_segment.offset
for (idx, d) in enumerate(known_labels):
label = d['label']
x = (d['x'] - offset[0])
y = (d['y'] - offset[1])
label_point = instantiators['point'](x, y)
type_ = d['type']
arr = type_.split(' ')
if (len(arr) > 1):
type_ = arr[(- 1)]
instances = get_all_instances(graph_parse, type_)
if (len(instances) == 0):
logging.error(('no instance found of type %s' % type_))
continue
if ((len(arr) > 1) and (type_ == 'line') and (arr[0] == 'length')):
distances = [(key, label_distance_to_line(label_point, instance, True)) for (key, instance) in instances.iteritems()]
elif (type_ == 'line'):
distances = [(key, label_distance_to_line(label_point, instance, False)) for (key, instance) in instances.iteritems()]
elif (type_ == 'point'):
distances = [(key, label_distance_to_point(label_point, instance)) for (key, instance) in instances.iteritems()]
elif (type_ == 'arc'):
distances = [(key, label_distance_to_arc(label_point, instance)) for (key, instance) in instances.iteritems()]
elif (type_ == 'angle'):
distances = [(key, label_distance_to_angle(label_point, instance)) for (key, instance) in instances.iteritems()]
argmin_key = min(distances, key=(lambda pair: pair[1]))[0]
if (type_ == 'line'):
(a_key, b_key) = argmin_key
a_point = graph_parse.point_variables[a_key]
b_point = graph_parse.point_variables[b_key]
formula = FormulaNode(signatures['Line'], [a_point, b_point])
if ((len(arr) > 1) and (arr[0] == 'length')):
formula = FormulaNode(signatures['LengthOf'], [formula])
elif (type_ == 'point'):
formula = graph_parse.point_variables[argmin_key]
point_key_dict[label] = argmin_key
elif (type_ == 'angle'):
(a_key, b_key, c_key) = argmin_key
a_point = graph_parse.point_variables[a_key]
b_point = graph_parse.point_variables[b_key]
c_point = graph_parse.point_variables[c_key]
formula = FormulaNode(signatures['Angle'], [a_point, b_point, c_point])
if ((len(arr) > 1) and (arr[0] == 'angle')):
formula = FormulaNode(signatures['MeasureOf'], [formula])
formula = FormulaNode(signatures['Div'], [formula, FormulaNode(signatures['Degree'], [])])
elif (type_ == 'arc'):
((center_key, radius_key), a_key, b_key) = argmin_key
center_point = graph_parse.point_variables[center_key]
radius = graph_parse.radius_variables[center_key][radius_key]
circle = FormulaNode(signatures['Circle'], [center_point, radius])
a_point = graph_parse.point_variables[a_key]
b_point = graph_parse.point_variables[b_key]
formula = FormulaNode(signatures['Arc'], [circle, a_point, b_point])
if ((len(arr) > 0) and (arr[0] == 'angle')):
formula = FormulaNode(signatures['MeasureOf'], [formula])
formula = FormulaNode(signatures['Div'], [formula, FormulaNode(signatures['Degree'], [])])
if (label not in match_dict):
match_dict[label] = []
elif issubtype(formula.return_type, 'entity'):
raise Exception()
match_dict[label].append(formula)
match_parse = MatchParse(graph_parse, match_dict, point_key_dict)
return match_parse |
def make_module(mod, _module_class, _compilation_unit):
if isinstance(mod, ScriptModule):
return mod
elif torch._jit_internal.module_has_exports(mod):
infer_methods_stubs_fn = torch.jit._recursive.make_stubs_from_exported_methods
return torch.jit._recursive.create_script_module(mod, infer_methods_stubs_fn, share_types=False)
else:
if (_module_class is None):
_module_class = TopLevelTracedModule
return _module_class(mod, _compilation_unit=_compilation_unit) |
class HeavyTorsoHopper(RoboschoolXMLModifierMixin, ModifiableRoboschoolHopper):
def __init__(self):
self.density = 1500
with self.modify_xml('hopper.xml') as tree:
for elem in tree.iterfind('worldbody/body/geom'):
elem.set('density', str(self.density))
RoboschoolForwardWalkerMujocoXML.__init__(self, self.model_xml, 'torso', action_dim=3, obs_dim=15, power=0.75)
def parameters(self):
parameters = super(HeavyTorsoHopper, self).parameters
parameters.update({'density': self.density})
return parameters |
def view_policy(task, world_params, policy_fn, max_time_steps, number_of_resets, env_wrappers=np.array([]), env_wrappers_args=np.array([])):
actual_skip_frame = world_params['skip_frame']
env = get_world(task.get_task_name(), task.get_task_params(), world_params, enable_visualization=True, env_wrappers=env_wrappers, env_wrappers_args=env_wrappers_args)
for reset_idx in range(number_of_resets):
obs = env.reset()
for time in range(int((max_time_steps / number_of_resets))):
desired_action = policy_fn(obs)
for _ in range(actual_skip_frame):
(obs, reward, done, info) = env.step(action=desired_action)
env.close() |
def find_entry(entries, time_point, start_time):
if (time_point is None):
return entries[(- 1)]
s = utils.time_to_seconds(time_point)
last = None
for entry in entries:
timestamp = entry['timestamp']
elasp = (timestamp - start_time)
if (elasp > s):
return entry
last = entry
assert False |
def test_execute_filter_method(app, schema_url):
schema = oas_loaders.from_uri(schema_url, method='POST')
execute(schema)
assert_incoming_requests_num(app, 0) |
class DmaNode():
def __init__(self, reg):
self.datasize = int(reg['DMA data size(B)'])
self.cycle = int(reg['Asic Cycle'])
self.direction = reg['Direction'] |
class NoCost(CostFunction):
def get_parameters(self):
return []
def log_likelihood(self, states, costs):
return T.zeros_like(costs)
def evaluate(self, states):
raise Exception('Cannot evaluate NoCost function')
def is_cost_function(self):
return False |
_params.config
def training_cfg():
optimizer = 'adam'
learning_rate = 0.001
gradient_clipping = 'norm'
gradient_clipping_bounds = 1
use_memory_saving_gradients = False |
class DenseModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(DenseModel, self).__init__()
self.dense = models.densenet161(pretrained=bool(load_weight)).features
for param in self.dense.parameters():
param.requires_grad = train_enc
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv_layer0 = nn.Sequential(*list(self.dense)[:3])
self.conv_layer1 = nn.Sequential(self.dense.pool0, self.dense.denseblock1, *list(self.dense.transition1)[:3])
self.conv_layer2 = nn.Sequential(self.dense.transition1[3], self.dense.denseblock2, *list(self.dense.transition2)[:3])
self.conv_layer3 = nn.Sequential(self.dense.transition2[3], self.dense.denseblock3, *list(self.dense.transition3)[:3])
self.conv_layer4 = nn.Sequential(self.dense.transition3[3], self.dense.denseblock4)
self.deconv_layer0 = nn.Sequential(nn.Conv2d(in_channels=2208, out_channels=512, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), self.linear_upsampling)
self.deconv_layer1 = nn.Sequential(nn.Conv2d(in_channels=(512 + 1056), out_channels=256, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), self.linear_upsampling)
self.deconv_layer2 = nn.Sequential(nn.Conv2d(in_channels=(384 + 256), out_channels=192, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), self.linear_upsampling)
self.deconv_layer3 = nn.Sequential(nn.Conv2d(in_channels=(192 + 192), out_channels=96, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), self.linear_upsampling)
self.deconv_layer4 = nn.Sequential(nn.Conv2d(in_channels=(96 + 96), out_channels=128, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), self.linear_upsampling)
self.deconv_layer5 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(in_channels=128, out_channels=1, kernel_size=3, padding=1, bias=True), nn.Sigmoid())
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer0(images)
out2 = self.conv_layer1(out1)
out3 = self.conv_layer2(out2)
out4 = self.conv_layer3(out3)
out5 = self.conv_layer4(out4)
assert (out1.size() == (batch_size, 96, 128, 128))
assert (out2.size() == (batch_size, 192, 64, 64))
assert (out3.size() == (batch_size, 384, 32, 32))
assert (out4.size() == (batch_size, 1056, 16, 16))
assert (out5.size() == (batch_size, 2208, 8, 8))
out5 = self.deconv_layer0(out5)
x = torch.cat((out5, out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x, out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x, out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x, out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x |
_tokenizers
class GPTSanJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = GPTSanJapaneseTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {'do_clean_text': False, 'add_prefix_space': False}
def setUp(self):
super().setUp()
vocab_tokens = ['', '', '', '', ',', '', '', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
emoji_tokens = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}}
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['emoji_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
with open(self.emoji_file, 'w') as emoji_writer:
emoji_writer.write(json.dumps(emoji_tokens))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = ' \n'
output_text = ' \n'
return (input_text, output_text)
def get_clean_sequence(self, tokenizer):
(input_text, output_text) = self.get_input_output_texts(tokenizer)
ids = tokenizer.encode(output_text, add_special_tokens=False)
text = tokenizer.decode(ids, clean_up_tokenization_spaces=False)
return (text, ids)
def test_pretokenized_inputs(self):
pass
def test_maximum_encoding_length_pair_input(self):
pass
def test_maximum_encoding_length_single_input(self):
pass
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
input_text = '\u3000'
expected_token = ['', '', '', '', '', '<SP>', '', '', '', '', '']
tokens = tokenizer.tokenize(input_text)
self.assertListEqual(tokens, expected_token)
expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(input_ids, expected_ids)
input_tokens = (tokens + [tokenizer.unk_token])
expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
self.assertListEqual(input_ids, expected_ids)
def test_token_bagging(self):
tokenizer = self.get_tokenizer()
input_text = '<|bagoftoken|><|bagoftoken|>'
expected_text = ''
tokens = tokenizer.encode(input_text)
output_text = tokenizer.decode(tokens)
self.assertEqual(output_text, expected_text)
def test_prefix_input(self):
tokenizer = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
prefix_text = ''
input_text = ''
expected_text = ''
tokens_1 = tokenizer.encode((prefix_text + input_text))
tokens_2 = tokenizer.encode('', prefix_text=(prefix_text + input_text))
tokens_3 = tokenizer.encode(input_text, prefix_text=prefix_text)
output_text_1 = tokenizer.decode(tokens_1)
output_text_2 = tokenizer.decode(tokens_2)
output_text_3 = tokenizer.decode(tokens_3)
self.assertEqual(output_text_1, expected_text)
self.assertEqual(output_text_2, expected_text)
self.assertEqual(output_text_3, expected_text)
def test_token_type_ids(self):
tokenizer = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
prefix_text = ''
input_text = ''
len_prefix = (len(tokenizer.encode(prefix_text)) - 2)
len_text = (len(tokenizer.encode(input_text)) - 2)
expected_mask_1 = ([1] + ([0] * ((len_prefix + len_text) + 1)))
expected_mask_2 = (([1] * ((len_prefix + len_text) + 1)) + [0])
expected_mask_3 = (([1] + ([1] * len_prefix)) + ([0] * (len_text + 1)))
type_id_1 = tokenizer((prefix_text + input_text)).token_type_ids
type_id_2 = tokenizer('', prefix_text=(prefix_text + input_text)).token_type_ids
type_id_3 = tokenizer(input_text, prefix_text=prefix_text).token_type_ids
self.assertListEqual(type_id_1, expected_mask_1)
self.assertListEqual(type_id_2, expected_mask_2)
self.assertListEqual(type_id_3, expected_mask_3)
def test_prefix_tokens(self):
tokenizer = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
x_token_1 = tokenizer.encode('')
x_token_2 = tokenizer.encode('', prefix_text='')
x_token_3 = tokenizer.encode('', prefix_text='')
self.assertEqual(tokenizer.decode(x_token_1), tokenizer.decode(x_token_2))
self.assertEqual(tokenizer.decode(x_token_1), tokenizer.decode(x_token_3))
self.assertNotEqual(x_token_1, x_token_2)
self.assertNotEqual(x_token_1, x_token_3)
self.assertEqual(x_token_1[1], x_token_2[(- 1)])
self.assertEqual(x_token_1[1], x_token_3[3])
def test_batch_encode(self):
tokenizer = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese')
input_pairs = [['', ''], ['', '']]
x_token = tokenizer(input_pairs, padding=True)
x_token_2 = tokenizer.batch_encode_plus(input_pairs, padding=True)
expected_outputs = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
expected_typeids = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
expected_attmask = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
self.assertListEqual(x_token.input_ids, expected_outputs)
self.assertListEqual(x_token.token_type_ids, expected_typeids)
self.assertListEqual(x_token.attention_mask, expected_attmask)
self.assertListEqual(x_token_2.input_ids, expected_outputs)
self.assertListEqual(x_token_2.token_type_ids, expected_typeids)
self.assertListEqual(x_token_2.attention_mask, expected_attmask)
def test_conversion_reversible(self):
pass
def test_padding_different_model_input_name(self):
pass |
def make_setuptools_egg_info_args(setup_py_path, egg_info_dir, no_user_config):
args = make_setuptools_shim_args(setup_py_path, no_user_config=no_user_config)
args += ['egg_info']
if egg_info_dir:
args += ['--egg-base', egg_info_dir]
return args |
class Ngrams(object):
def __init__(self, n_max=5, split_on=None):
self.max_ngrams = n_max
self.split_on = split_on
def apply(self, s):
text = get_text(s.words, s.char_offsets)
if self.split_on:
(words, char_offsets) = retokenize(s, self.split_on)
else:
(words, char_offsets) = (s.words, s.char_offsets)
matches = []
for i in range(0, len(words)):
match = None
start = char_offsets[i]
if (not words[i].strip()):
continue
for j in range((i + 1), min(((i + self.max_ngrams) + 1), (len(words) + 1))):
if (not words[(j - 1)].strip()):
continue
end = (char_offsets[(j - 1)] + len(words[(j - 1)]))
(yield Span(start, (end - 1), s)) |
def VGG16_rpn_frozen_features(model):
return build_generic_detection_model(model, VGG16.add_VGG16_conv5_body, freeze_conv_body=True) |
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = '
filename = 'cifar-100-python.tar.gz'
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']]
test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']]
meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'} |
def solve_ineq_univar(ineq):
ineqvar = ineq.variables()
if (len(ineqvar) != 1):
raise NotImplementedError(('The command solve_ineq_univar accepts univariate inequalities only. Your variables are ' + ineqvar))
ineq0 = ineq._maxima_()
ineq0.parent().eval('if solve_rat_ineq_loaded#true then (solve_rat_ineq_loaded:true,load("solve_rat_ineq.mac")) ')
sol = ineq0.solve_rat_ineq().sage()
if (repr(sol) == 'all'):
from sage.rings.infinity import Infinity
sol = [(ineqvar[0] < Infinity)]
return sol |
def register_Ns3TypeIdValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
cls.add_constructor([param('ns3::TypeId const &', 'value')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::TypeId', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')])
return |
class GCClearReferencesSlot(GCDependentSlot):
def slot_code(self, scope):
if scope.needs_tp_clear():
return GCDependentSlot.slot_code(self, scope)
return '0' |
class ASPPModule(nn.ModuleList):
def __init__(self, dilations, in_channels, channels, conv_cfg, norm_cfg, act_cfg):
super(ASPPModule, self).__init__()
self.dilations = dilations
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for dilation in dilations:
self.append(ConvModule(self.in_channels, self.channels, (1 if (dilation == 1) else 3), dilation=dilation, padding=(0 if (dilation == 1) else dilation), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
def forward(self, x):
aspp_outs = []
for aspp_module in self:
aspp_outs.append(aspp_module(x))
return aspp_outs |
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
if (block_type == 'block35'):
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif (block_type == 'block17'):
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif (block_type == 'block8'):
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError(('Unknown Inception-ResNet block type. Expects "block35", "block17" or "block8", but got: ' + str(block_type)))
block_name = ((block_type + '_') + str(block_idx))
channel_axis = (1 if (K.image_data_format() == 'channels_first') else 3)
mixed = Concatenate(axis=channel_axis, name=(block_name + '_mixed'))(branches)
up = conv2d_bn(mixed, K.int_shape(x)[channel_axis], 1, activation=None, use_bias=True, name=(block_name + '_conv'))
x = Lambda((lambda inputs, scale: (inputs[0] + (inputs[1] * scale))), output_shape=K.int_shape(x)[1:], arguments={'scale': scale}, name=block_name)([x, up])
if (activation is not None):
x = Activation(activation, name=(block_name + '_ac'))(x)
return x |
def init_weights(net, init_type='normal'):
if (init_type == 'normal'):
net.apply(weights_init_normal)
elif (init_type == 'xavier'):
net.apply(weights_init_xavier)
elif (init_type == 'kaiming'):
net.apply(weights_init_kaiming)
elif (init_type == 'orthogonal'):
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type)) |
class SymmetricFunctionAlgebra_generic(CombinatorialFreeModule):
def __init__(self, Sym, basis_name=None, prefix=None, graded=True):
R = Sym.base_ring()
from sage.categories.commutative_rings import CommutativeRings
if (R not in CommutativeRings()):
raise TypeError('argument R must be a commutative ring')
try:
R(Integer(1))
except (TypeError, ValueError):
raise ValueError('R must have a unit element')
if (basis_name is not None):
self._basis = basis_name
if (prefix is not None):
self._prefix = prefix
self._sym = Sym
if graded:
cat = GradedSymmetricFunctionsBases(Sym)
else:
cat = FilteredSymmetricFunctionsBases(Sym)
CombinatorialFreeModule.__init__(self, Sym.base_ring(), _Partitions, category=cat, bracket='', prefix=prefix)
_print_style = 'lex'
def __getitem__(self, c):
C = self.basis().keys()
if (not isinstance(c, C.element_class)):
if (c in ZZ):
c = C([c])
else:
c = C(c)
return self.monomial(c)
def _change_by_proportionality(self, x, function):
BR = self.base_ring()
z_elt = {}
for (m, c) in x._monomial_coefficients.items():
coeff = function(m)
z_elt[m] = BR((c * coeff))
return self._from_dict(z_elt)
def _change_by_plethysm(self, x, expr, deg_one):
p = self.realization_of().power()
p_x = p(x)
expr_k = (lambda k: expr.subs(**dict([(str(x), (x ** k)) for x in deg_one])))
f = (lambda m, c: (m, (c * prod([expr_k(k) for k in m]))))
return self(p_x.map_item(f))
def _apply_multi_module_morphism(self, x, y, f, orthogonal=False):
res = 0
if orthogonal:
if (len(x._monomial_coefficients) > len(y._monomial_coefficients)):
(x, y) = (y, x)
for (mx, cx) in x._monomial_coefficients.items():
if (mx not in y._monomial_coefficients):
continue
else:
cy = y._monomial_coefficients[mx]
res += ((cx * cy) * f(mx, mx))
return res
else:
for (mx, cx) in x._monomial_coefficients.items():
for (my, cy) in y._monomial_coefficients.items():
res += ((cx * cy) * f(mx, my))
return res
def _from_element(self, x):
return self._from_dict(x.monomial_coefficients())
def _from_cache(self, element, cache_function, cache_dict, **subs_dict):
BR = self.base_ring()
zero = BR.zero()
z_elt = {}
for (part, c) in element.monomial_coefficients().items():
if (sum(part) not in cache_dict):
cache_function(sum(part))
part = _Partitions(part)
for (part2, c2) in cache_dict[sum(part)][part].items():
if hasattr(c2, 'subs'):
c3 = (c * BR(c2.subs(**subs_dict)))
else:
c3 = (c * BR(c2))
z_elt[part2] = (z_elt.get(part2, zero) + BR(c3))
return self._from_dict(z_elt)
def _invert_morphism(self, n, base_ring, self_to_other_cache, other_to_self_cache, to_other_function=None, to_self_function=None, upper_triangular=False, lower_triangular=False, ones_on_diagonal=False):
if (to_other_function is not None):
known_cache = self_to_other_cache
unknown_cache = other_to_self_cache
known_function = to_other_function
else:
unknown_cache = self_to_other_cache
known_cache = other_to_self_cache
known_function = to_self_function
if ((n in known_cache) and (n in unknown_cache)):
return
one = base_ring.one()
zero = base_ring.zero()
pn = Partitions_n(n).list()
len_pn = len(pn)
known_cache_n = {}
known_matrix_n = matrix(base_ring, len_pn, len_pn)
unknown_cache_n = {}
for i in range(len_pn):
known_cache_part = {}
f = known_function(pn[i])
for j in range(len_pn):
if (lower_triangular and (j > i)):
break
if (upper_triangular and (i > j)):
continue
value = f(pn[j])
if (value != zero):
known_cache_part[pn[j]] = value
known_matrix_n[(i, j)] = value
known_cache_n[pn[i]] = known_cache_part
unknown_cache_n[pn[i]] = {}
if ((upper_triangular is not False) and (lower_triangular is not False)):
raise ValueError('only one of upper_triangular and lower_triangular can be specified')
elif (upper_triangular is not False):
inverse = copy(known_matrix_n.parent().zero_matrix())
delta = (lambda i: (lambda j: (one if (i == j) else zero)))
for column in range(len_pn):
e = delta(column)
x = ([0] * len_pn)
for i in range((len_pn - 1), (- 1), (- 1)):
value = e(i)
if (not ones_on_diagonal):
value /= known_matrix_n[(i, i)]
for j in range((i + 1), len_pn):
if ones_on_diagonal:
value -= (known_matrix_n[(i, j)] * x[j])
else:
value -= ((known_matrix_n[(i, j)] * x[j]) / known_matrix_n[(i, i)])
x[i] = value
for j in range((column + 1)):
if (x[j] != zero):
inverse[(j, column)] = x[j]
elif (lower_triangular is not False):
inverse = copy(known_matrix_n.parent().zero_matrix())
delta = (lambda i: (lambda j: (one if (i == j) else zero)))
for column in range(len_pn):
e = delta(column)
x = []
for i in range(len_pn):
value = e(i)
if (not ones_on_diagonal):
value /= known_matrix_n[(i, i)]
for j in range(len(x)):
if ones_on_diagonal:
value -= (known_matrix_n[(i, j)] * x[j])
else:
value -= ((known_matrix_n[(i, j)] * x[j]) / known_matrix_n[(i, i)])
x.append(value)
for j in range(column, len(x)):
if (x[j] != zero):
inverse[(j, column)] = x[j]
else:
inverse = (~ known_matrix_n)
for i in range(len_pn):
for j in range(len_pn):
if (inverse[(i, j)] != zero):
if hasattr(self, '_normalize_coefficients'):
unknown_cache_n[pn[i]][pn[j]] = self._normalize_coefficients(inverse[(i, j)])
else:
unknown_cache_n[pn[i]][pn[j]] = inverse[(i, j)]
known_cache[n] = known_cache_n
unknown_cache[n] = unknown_cache_n
def symmetric_function_ring(self):
return self.realization_of()
def prefix(self):
return self._prefix
def transition_matrix(self, basis, n):
P = Partitions_n(n)
Plist = P.list()
m = []
for row_part in Plist:
z = basis(self(row_part))
m.append([z.coefficient(col_part) for col_part in Plist])
return matrix(m)
def _gram_schmidt(self, n, source, scalar, cache, leading_coeff=None, upper_triangular=True):
BR = self.base_ring()
one = BR.one()
p = self.realization_of().p()
pscalar = (lambda x, y: p._apply_multi_module_morphism(p(x), p(y), (lambda a, b: scalar(a)), orthogonal=True))
if (leading_coeff is None):
leading_coeff = (lambda x: one)
l = Partitions_n(n).list()
if upper_triangular:
l.reverse()
precomputed_elements = []
cache[l[0]] = {l[0]: leading_coeff(l[0])}
precomputed_elements.append((leading_coeff(l[0]) * source(l[0])))
for i in range(1, len(l)):
start = (leading_coeff(l[i]) * source(l[i]))
sub = 0
for j in range(i):
sub += ((pscalar(start, precomputed_elements[j]) / pscalar(precomputed_elements[j], precomputed_elements[j])) * precomputed_elements[j])
res = (start - sub)
if hasattr(self, '_normalize_coefficients'):
res = res.map_coefficients(self._normalize_coefficients)
precomputed_elements.append(res)
cache[l[i]] = {}
for j in range((i + 1)):
cache[l[i]][l[j]] = res.coefficient(l[j])
def _inner_plethysm_pk_g(self, k, g, cache):
try:
return cache[(k, g)]
except KeyError:
pass
p = self.realization_of().p()
res = 0
degrees = sorted(set((sum(m) for m in g.support())))
for d in degrees:
for mu in Partitions_n(d):
mu_k = mu.power(k)
if (mu_k in g.support()):
res += (((g.coefficient(mu_k) * mu_k.centralizer_size()) / mu.centralizer_size()) * p(mu))
cache[(k, g)] = res
return res
def _inner_plethysm_pnu_g(self, p_x, cache, nu):
if (not nu._list):
s = self.realization_of().s()
degrees = [part.size() for part in p_x.support()]
degrees = sorted(set(degrees))
if (0 in degrees):
ext = self([])
else:
ext = 0
return (ext + self(sum([s([n]) for n in degrees if (n != 0)])))
res = [self._inner_plethysm_pk_g(k, p_x, cache) for k in nu]
return self(reduce((lambda x, y: (0 if (x == 0) else x.itensor(y))), res))
def _dual_basis_default(self):
return self.dual_basis(scalar=zee, scalar_name='Hall scalar product')
def dual_basis(self, scalar=None, scalar_name='', basis_name=None, prefix=None):
from . import dual
if (scalar is None):
if ((basis_name is None) and (prefix is None)):
return self._dual_basis_default()
scalar = zee
scalar_name = 'Hall scalar product'
return dual.SymmetricFunctionAlgebra_dual(self, scalar, scalar_name, basis_name=basis_name, prefix=prefix)
def basis_name(self):
return self._basis
def get_print_style(self):
return self._print_style
def set_print_style(self, ps):
if (ps == 'lex'):
self.print_options(sorting_key=(lambda x: x))
elif (ps == 'length'):
self.print_options(sorting_key=len)
elif (ps == 'maximal_part'):
self.print_options(sorting_key=_lmax)
else:
raise ValueError('the print style must be one of lex, length, or maximal_part ')
self._print_style = ps
def _latex_term(self, m):
return super()._latex_term(','.join((str(i) for i in m)))
def from_polynomial(self, poly, check=True):
m = self.realization_of().m()
return self(m.from_polynomial(poly, check=check))
def product_by_coercion(self, left, right):
s = self.realization_of().schur()
return self(s.product(s(left), s(right)))
def coproduct_by_coercion(self, elt):
from sage.categories.tensor import tensor
s = self.realization_of().schur()
return self.tensor_square().sum(((coeff * tensor([self(s[x]), self(s[y])])) for ((x, y), coeff) in s(elt).coproduct())) |
def advance():
for i in range(NV):
acc = ((- pos.grad[i]) / (rho * (dx ** 2)))
vel[i] += (dt * (acc + gravity))
vel[i] *= ti.exp(((- dt) * damping))
for i in range(NV):
disp = (pos[i] - ball_pos)
disp2 = disp.norm_sqr()
if (disp2 <= (ball_radius ** 2)):
NoV = vel[i].dot(disp)
if (NoV < 0):
vel[i] -= ((NoV * disp) / disp2)
cond = (((pos[i] < 0) & (vel[i] < 0)) | ((pos[i] > 1) & (vel[i] > 0)))
for j in ti.static(range(pos.n)):
if cond[j]:
vel[i][j] = 0
pos[i] += (dt * vel[i]) |
class DeltaNetBase(torch.nn.Module):
def __init__(self, in_channels, conv_channels, mlp_depth, num_neighbors, grad_regularizer, grad_kernel_width, centralize_first=True):
super().__init__()
self.k = num_neighbors
self.grad_regularizer = grad_regularizer
self.grad_kernel_width = grad_kernel_width
conv_channels = ([in_channels] + conv_channels)
self.convs = torch.nn.ModuleList()
for i in range((len(conv_channels) - 1)):
last_layer = (i == (len(conv_channels) - 2))
self.convs.append(DeltaConv(conv_channels[i], conv_channels[(i + 1)], depth=mlp_depth, centralized=(centralize_first and (i == 0)), vector=(not last_layer)))
def forward(self, data):
pos = data.pos
batch = data.batch
edge_index = knn_graph(pos, self.k, batch, loop=True, flow='target_to_source')
if (hasattr(data, 'norm') and (data.norm is not None)):
normal = data.norm
(x_basis, y_basis) = build_tangent_basis(normal)
else:
edge_index_normal = knn_graph(pos, 10, batch, loop=True, flow='target_to_source')
(normal, x_basis, y_basis) = estimate_basis(pos, edge_index_normal, orientation=pos)
(grad, div) = build_grad_div(pos, normal, x_basis, y_basis, edge_index, batch, kernel_width=self.grad_kernel_width, regularizer=self.grad_regularizer)
x = (data.x if (hasattr(data, 'x') and (data.x is not None)) else pos)
v = (grad x)
out = []
for conv in self.convs:
(x, v) = conv(x, v, grad, div, edge_index)
out.append(x)
return out |
def test_BitMaskedArray_NumpyArray():
a = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1], dtype=np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True, length=13, lsb_order=False)
assert (a.to_typetracer().form == a.form)
assert (a.to_typetracer().form.type == a.form.type)
assert (len(a) == 13)
with pytest.raises(IndexError):
a[13]
with pytest.raises(IndexError):
a[(- 14)]
assert (a[0] == 0.0)
assert (a[1] == 1.0)
assert (a[2] == 2.0)
assert (a[3] == 3.0)
assert (a[4] is None)
assert (a[5] is None)
assert (a[6] is None)
assert (a[7] is None)
assert (a[8] == 1.1)
assert (a[9] is None)
assert (a[10] == 3.3)
assert (a[11] is None)
assert (a[12] == 5.5)
assert (a[(- 13)] == 0.0)
assert (a[(- 12)] == 1.0)
assert (a[(- 11)] == 2.0)
assert (a[(- 10)] == 3.0)
assert (a[(- 9)] is None)
assert (a[(- 8)] is None)
assert (a[(- 7)] is None)
assert (a[(- 6)] is None)
assert (a[(- 5)] == 1.1)
assert (a[(- 4)] is None)
assert (a[(- 3)] == 3.3)
assert (a[(- 2)] is None)
assert (a[(- 1)] == 5.5)
assert isinstance(a[5:], ak.contents.bytemaskedarray.ByteMaskedArray)
assert (a.to_typetracer()[5:].form == a[5:].form)
assert (len(a[5:]) == 8)
assert (len(a[(- 8):]) == 8)
assert (len(a[5:100]) == 8)
assert (len(a[(- 8):100]) == 8)
assert (a[5:][2] is None)
assert (a[5:][3] == 1.1)
assert (a[(- 8):][2] is None)
assert (a[(- 8):][3] == 1.1)
with pytest.raises(IndexError):
a['bad']
b = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0], dtype=np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False, length=13, lsb_order=False)
assert (b.to_typetracer().form == b.form)
assert (b.to_typetracer().form.type == b.form.type)
assert (len(b) == 13)
with pytest.raises(IndexError):
b[13]
with pytest.raises(IndexError):
b[(- 14)]
assert (b[0] == 0.0)
assert (b[1] == 1.0)
assert (b[2] == 2.0)
assert (b[3] == 3.0)
assert (b[4] is None)
assert (b[5] is None)
assert (b[6] is None)
assert (b[7] is None)
assert (b[8] == 1.1)
assert (b[9] is None)
assert (b[10] == 3.3)
assert (b[11] is None)
assert (b[12] == 5.5)
assert (b[(- 13)] == 0.0)
assert (b[(- 12)] == 1.0)
assert (b[(- 11)] == 2.0)
assert (b[(- 10)] == 3.0)
assert (b[(- 9)] is None)
assert (b[(- 8)] is None)
assert (b[(- 7)] is None)
assert (b[(- 6)] is None)
assert (b[(- 5)] == 1.1)
assert (b[(- 4)] is None)
assert (b[(- 3)] == 3.3)
assert (b[(- 2)] is None)
assert (b[(- 1)] == 5.5)
assert isinstance(b[5:], ak.contents.bytemaskedarray.ByteMaskedArray)
assert (b.to_typetracer()[5:].form == b[5:].form)
assert (len(b[5:]) == 8)
assert (len(b[(- 8):]) == 8)
assert (len(b[5:100]) == 8)
assert (len(b[(- 8):100]) == 8)
assert (b[5:][2] is None)
assert (b[5:][3] == 1.1)
assert (b[(- 8):][2] is None)
assert (b[(- 8):][3] == 1.1)
with pytest.raises(IndexError):
b['bad']
c = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1], dtype=np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True, length=13, lsb_order=True)
assert (c.to_typetracer().form == c.form)
assert (c.to_typetracer().form.type == c.form.type)
assert (len(c) == 13)
with pytest.raises(IndexError):
c[13]
with pytest.raises(IndexError):
c[(- 14)]
assert (c[0] == 0.0)
assert (c[1] == 1.0)
assert (c[2] == 2.0)
assert (c[3] == 3.0)
assert (c[4] is None)
assert (c[5] is None)
assert (c[6] is None)
assert (c[7] is None)
assert (c[8] == 1.1)
assert (c[9] is None)
assert (c[10] == 3.3)
assert (c[11] is None)
assert (c[12] == 5.5)
assert (c[(- 13)] == 0.0)
assert (c[(- 12)] == 1.0)
assert (c[(- 11)] == 2.0)
assert (c[(- 10)] == 3.0)
assert (c[(- 9)] is None)
assert (c[(- 8)] is None)
assert (c[(- 7)] is None)
assert (c[(- 6)] is None)
assert (c[(- 5)] == 1.1)
assert (c[(- 4)] is None)
assert (c[(- 3)] == 3.3)
assert (c[(- 2)] is None)
assert (c[(- 1)] == 5.5)
assert isinstance(c[5:], ak.contents.bytemaskedarray.ByteMaskedArray)
assert (c.to_typetracer()[5:].form == c[5:].form)
assert (len(c[5:]) == 8)
assert (len(c[(- 8):]) == 8)
assert (len(c[5:100]) == 8)
assert (len(c[(- 8):100]) == 8)
assert (c[5:][2] is None)
assert (c[5:][3] == 1.1)
assert (c[(- 8):][2] is None)
assert (c[(- 8):][3] == 1.1)
with pytest.raises(IndexError):
c['bad']
d = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], dtype=np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False, length=13, lsb_order=True)
assert (d.to_typetracer().form == d.form)
assert (d.to_typetracer().form.type == d.form.type)
assert (len(d) == 13)
with pytest.raises(IndexError):
d[13]
with pytest.raises(IndexError):
d[(- 14)]
assert (d[0] == 0.0)
assert (d[1] == 1.0)
assert (d[2] == 2.0)
assert (d[3] == 3.0)
assert (d[4] is None)
assert (d[5] is None)
assert (d[6] is None)
assert (d[7] is None)
assert (d[8] == 1.1)
assert (d[9] is None)
assert (d[10] == 3.3)
assert (d[11] is None)
assert (d[12] == 5.5)
assert (d[(- 13)] == 0.0)
assert (d[(- 12)] == 1.0)
assert (d[(- 11)] == 2.0)
assert (d[(- 10)] == 3.0)
assert (d[(- 9)] is None)
assert (d[(- 8)] is None)
assert (d[(- 7)] is None)
assert (d[(- 6)] is None)
assert (d[(- 5)] == 1.1)
assert (d[(- 4)] is None)
assert (d[(- 3)] == 3.3)
assert (d[(- 2)] is None)
assert (d[(- 1)] == 5.5)
assert isinstance(d[5:], ak.contents.bytemaskedarray.ByteMaskedArray)
assert (d.to_typetracer()[5:].form == d[5:].form)
assert (len(d[5:]) == 8)
assert (len(d[(- 8):]) == 8)
assert (len(d[5:100]) == 8)
assert (len(d[(- 8):100]) == 8)
assert (d[5:][2] is None)
assert (d[5:][3] == 1.1)
assert (d[(- 8):][2] is None)
assert (d[(- 8):][3] == 1.1)
with pytest.raises(IndexError):
d['bad'] |
def make_roi_box_predictor(cfg, in_channels):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels) |
class PoolFormerBlock(nn.Module):
def __init__(self, dim, pool_size=3, dpr=0.0, layer_scale_init_value=1e-05):
super().__init__()
self.norm1 = nn.GroupNorm(1, dim)
self.token_mixer = Pooling(pool_size)
self.norm2 = nn.GroupNorm(1, dim)
self.drop_path = (DropPath(dpr) if (dpr > 0.0) else nn.Identity())
self.mlp = MLP(dim, int((dim * 4)))
self.layer_scale_1 = nn.Parameter((layer_scale_init_value * torch.ones(dim)), requires_grad=True)
self.layer_scale_2 = nn.Parameter((layer_scale_init_value * torch.ones(dim)), requires_grad=True)
def forward(self, x: Tensor) -> Tensor:
x = (x + self.drop_path((self.layer_scale_1.unsqueeze((- 1)).unsqueeze((- 1)) * self.token_mixer(self.norm1(x)))))
x = (x + self.drop_path((self.layer_scale_2.unsqueeze((- 1)).unsqueeze((- 1)) * self.mlp(self.norm2(x)))))
return x |
def _is_clashed(chunk1: tuple, chunk2: tuple, allow_level: int=NESTED):
if (allow_level == FLAT):
return _is_overlapping(chunk1, chunk2)
elif (allow_level == NESTED):
return (_is_overlapping(chunk1, chunk2) and (not _is_nested(chunk1, chunk2)))
else:
return False |
def Phenotyping_dataset(args=None):
dataset = Dataset(name='Phenotyping', path='preprocess/MIMIC_Datasets/Diagnosis/vec_diagnosis.p', max_length=20000, args=args)
y = np.array(dataset.train_data.y)
dataset.pos_weight = list(((len(y) / y.sum(0)) - 1))
dataset.trainer_type = 'Multi_Label'
dataset.save_on_metric = 'macro_roc_auc'
dataset.output_size = len(dataset.pos_weight)
dataset.test_data = dataset.test_data.mock(n=5000)
dataset.keys_to_use = {'macro_roc_auc': 'roc_auc', 'macro_pr_auc': 'pr_auc'}
return dataset |
def is_backend_raw_tensor_dim_tag_independent() -> bool:
return _backend.global_backend.is_backend_raw_tensor_dim_tag_independent |
class Measure():
def __call__(self, states, actions, next_states, next_state_means, next_state_vars, model):
raise NotImplementedError |
class SawyerCoffeePullEnv(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.75, 0.0)
obj_high = (0.05, 0.8, 0.0)
goal_low = ((- 0.1), 0.6, (- 0.001))
goal_high = (0.1, 0.7, 0.0)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.75, 0.0]), 'obj_init_angle': 0.3, 'hand_init_pos': np.array([0.0, 0.6, 0.2])}
self.goal = np.array([0.0, 0.6, 0])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_coffee.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.07))}
return (ob, reward, False, info)
def _target_site_config(self):
return [('mug_goal', self._target_pos)]
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def adjust_initObjPos(self, orig_init_pos):
diff = (self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2])
adjustedPos = (orig_init_pos[:2] + diff)
return [adjustedPos[0], adjustedPos[1], self.get_body_com('obj')[(- 1)]]
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while (np.linalg.norm((goal_pos[:2] - self._target_pos[:2])) < 0.15):
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
self._target_pos = np.concatenate((goal_pos[(- 3):(- 1)], [self.obj_init_pos[(- 1)]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
machine_pos = (goal_pos[:3] - np.array([0, (- 0.15), (- 0.27)]))
button_pos = (machine_pos + np.array([0.0, (- 0.12), 0.05]))
self.sim.model.body_pos[self.model.body_name2id('coffee_machine')] = machine_pos
self.sim.model.body_pos[self.model.body_name2id('button')] = button_pos
self._set_obj_xyz(self.obj_init_pos)
self.maxPullDist = np.linalg.norm((self.obj_init_pos[:2] - np.array(self._target_pos)[:2]))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.reachCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
goal = self._target_pos
c1 = 1000
c2 = 0.01
c3 = 0.001
assert np.all((goal == self._get_site_pos('mug_goal')))
reachDist = np.linalg.norm((fingerCOM - objPos))
pullDist = np.linalg.norm((objPos[:2] - goal[:2]))
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((np.concatenate((objPos[:(- 1)], [self.init_fingerCOM[(- 1)]])) - fingerCOM))
if (reachDistxy < 0.05):
reachRew = ((- reachDist) + 0.1)
if (reachDist < 0.05):
reachRew += (max(actions[(- 1)], 0) / 50)
else:
reachRew = (- reachDistxy)
if (reachDist < 0.05):
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
else:
pullRew = 0
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
def cyclic_graph():
classifier = HierarchicalClassifier()
classifier.hierarchy_ = nx.DiGraph([('a', 'b'), ('b', 'c'), ('c', 'a')])
classifier.logger_ = logging.getLogger('HC')
return classifier |
class L2Norm(Component):
kind = 'l2'
def __init__(self, scale=1.0, context={}):
super().__init__(context=context)
self.scale = scale
def __call__(self, mbtr, data=None):
with np.errstate(divide='ignore'):
mbtr /= np.linalg.norm(mbtr, axis=1, keepdims=True, ord=2)
mbtr *= self.scale
return mbtr
def _get_config(self):
return {'scale': self.scale} |
class SYESRX4NetS(nn.Module):
def __init__(self, channels):
super(SYESRX4NetS, self).__init__()
img_range = 255.0
rgb_mean = (0.4488, 0.4371, 0.404)
self.img_range = img_range
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
self.headpre = AdditionFusionS(PrePyramidL1S(3), PrePyramidL2S(3), 3)
self.resblock = ResBlockS(num_feat=3)
self.head = QuadraticConnectionUnitS(nn.Sequential(nn.Conv2d(3, channels, 5, 1, 2), nn.PReLU(channels), nn.Conv2d(channels, channels, 3, 1, 1)), nn.Conv2d(3, channels, 5, 1, 2), channels)
self.body = QuadraticConnectionUnitS(nn.Conv2d(channels, channels, 3, 1, 1), nn.Conv2d(channels, channels, 1), channels)
self.att = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(channels, channels, 1), nn.PReLU(channels), nn.Conv2d(channels, channels, 1), nn.Sigmoid())
self.tail = nn.Sequential(nn.Conv2d(channels, 48, 3, 1, 1), nn.PixelShuffle(2), nn.PixelShuffle(2), nn.Conv2d(3, 3, 3, 1, 1))
def forward(self, x):
self.mean = self.mean.type_as(x)
x = ((x - self.mean) * self.img_range)
inp = x
x = self.headpre(x)
x = self.resblock(x)
x = self.head(x)
x = self.body(x)
x = (self.att(x) * x)
base = F.interpolate(inp, scale_factor=4, mode='bilinear', align_corners=False)
x = (self.tail(x) + base)
return ((x / self.img_range) + self.mean) |
_level_function()
def from_rdataframe(rdf, columns, *, keep_order=False, offsets_type='int64', with_name=None, highlevel=True, behavior=None, attrs=None):
return _impl(rdf, columns, highlevel, behavior, with_name, offsets_type, keep_order) |
def adjust_max(start, stop, start_value, stop_value, name=None):
with ops.name_scope(name, 'AdjustMax', [start, stop, name]) as name:
global_step = tf.train.get_global_step()
if (global_step is not None):
start = tf.convert_to_tensor(start, dtype=tf.int64)
stop = tf.convert_to_tensor(stop, dtype=tf.int64)
start_value = tf.convert_to_tensor(start_value, dtype=tf.float32)
stop_value = tf.convert_to_tensor(stop_value, dtype=tf.float32)
pred_fn_pairs = {}
pred_fn_pairs[(global_step <= start)] = (lambda : start_value)
pred_fn_pairs[((global_step > start) & (global_step <= stop))] = (lambda : tf.train.polynomial_decay(start_value, (global_step - start), (stop - start), end_learning_rate=stop_value, power=1.0, cycle=False))
default = (lambda : stop_value)
return tf.case(pred_fn_pairs, default, exclusive=True)
else:
return None |
.skipif((get_model_url_base_from_env() is None), reason='models are tested only when NNABLA_MODELS_URL_BASE is specified as an envvar')
.parametrize('model_class, up_to_list', [('ResNet18', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('ResNet34', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('ResNet50', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('ResNet101', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('ResNet152', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('SqueezeNetV10', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('SqueezeNetV11', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('MobileNet', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('MobileNetV2', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('NIN', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('SENet', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('DenseNet', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('InceptionV3', ['classifier', 'pool', 'prepool']), ('Xception', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('GoogLeNet', ['classifier', 'pool', 'prepool']), ('ResNeXt50', ['classifier', 'pool', 'lastconv', 'lastconv+relu']), ('ResNeXt101', ['classifier', 'pool', 'lastconv', 'lastconv+relu'])])
.parametrize('image_size_factor', [1, 2])
.parametrize('batch_size', [1, 5])
.parametrize('training', [False, True])
.parametrize('seed', [1223])
def test_nnabla_models_imagenet_etc(model_class, up_to_list, image_size_factor, batch_size, training, seed):
model_module = importlib.import_module('nnabla.models.imagenet')
nn.clear_parameters()
rng = np.random.RandomState(seed)
model = getattr(model_module, model_class)()
input_shape = list(model.input_shape)
input_shape[1] *= image_size_factor
input_shape[2] *= image_size_factor
input_shape = tuple(input_shape)
x = nn.Variable.from_numpy_array(rng.randint(0, 256, size=((batch_size,) + input_shape)))
for use_up_to in up_to_list:
check_global_pooling = True
force_global_pooling = False
returns_net = False
def _execute():
y = model(x, training=training, use_up_to=use_up_to, force_global_pooling=force_global_pooling, check_global_pooling=check_global_pooling)
y.forward()
if ((image_size_factor != 1) and ((model_class == 'SENet') or (use_up_to in ('classifier', 'pool')))):
with pytest.raises(ValueError):
_execute()
if ((use_up_to == 'pool') and (model_class != 'SENet')):
check_global_pooling = False
_execute()
force_global_pooling = True
_execute()
net = model(x, training=training, use_up_to=use_up_to, force_global_pooling=force_global_pooling, check_global_pooling=check_global_pooling, returns_net=True)
assert isinstance(net, NnpNetwork)
assert (len(net.inputs.values()) == 1)
assert (len(net.outputs.values()) == 1)
y = list(net.outputs.values())[0]
if training:
assert _check_trainable_parameters(y)
else:
assert (not _check_trainable_parameters(y)) |
def _not_email(val: Any, split: bool, errtype: str, processtype: str) -> Any:
if (processtype == 'coerce'):
if split:
return ((np.nan, np.nan, 0) if (errtype == 'null') else (np.nan, np.nan, 1))
return ((np.nan, 0) if (errtype == 'null') else (np.nan, 1))
elif (processtype == 'ignore'):
if split:
return ((val, np.nan, 0) if (errtype == 'null') else (val, np.nan, 1))
return ((val, 0) if (errtype == 'null') else (val, 1))
elif (processtype == 'raise'):
raise ValueError(f'unable to parse value {val}')
else:
raise ValueError('invalid error processing type') |
def gumbel_softmax(logits, temperature, device):
s = gumbel.sample(logits.shape).to(device).squeeze(2)
y = (logits + s)
return F.softmax((y / temperature), dim=(- 1)) |
class BratReader(object):
def __init__(self, dir, ext=EXT, score=SCORE):
self.dir = dir
self.ext = ext
self.len = (len(ext) + 1)
self.score = score
def __iter__(self):
for (doc_id, fh) in self.files():
(mentions, norms) = self.read(fh)
for (annot_id, start, end, name, ne_type) in mentions:
candidates = list(self.candidates(annot_id, ne_type, norms))
(yield (annot_id, doc_id, start, end, name, candidates))
def files(self):
for f in glob(os.path.join(self.dir, '*.{}'.format(self.ext))):
doc_id = os.path.basename(f)[:(- self.len)]
(yield (doc_id, utf8_open(f)))
def read(self, fh):
mentions = []
normalizations = defaultdict(list)
for l in fh:
l = l.strip()
if l.startswith('T'):
(annot_id, mention, name) = l.split('\t', 2)
(ne_type, start, end) = mention.split(' ', 2)
mentions.append((annot_id, start, end, name, ne_type))
elif l.startswith('N'):
(norm_id, reference) = l.split('\t', 1)
(_, annot_id, kb_id) = reference.split(' ', 2)
normalizations[annot_id].append(self.normalise(kb_id))
return (mentions, normalizations)
def normalise(self, kb_id):
return self.unquote(self.rm_namespace(kb_id))
def unquote(self, kb_id):
if hasattr(urllib, 'parse'):
return urllib.parse.unquote(kb_id)
return urllib.unquote(kb_id.encode('utf8')).decode('utf8')
def rm_namespace(self, kb_id):
if kb_id.startswith(WP):
return kb_id[len(WP):]
else:
return kb_id
def candidates(self, annot_id, ne_type, norms):
for kb_id in norms.get(annot_id, []):
(yield Candidate(kb_id, self.score, ne_type)) |
class MultiHeadAttention(nn.Module):
def __init__(self, attention, num_heads, hidden_size, key_size='default', value_size='default', out_size='default'):
key_size = ((hidden_size // num_heads) if (key_size == 'default') else key_size)
value_size = ((hidden_size // num_heads) if (value_size == 'default') else value_size)
out_size = (hidden_size if (out_size == 'default') else out_size)
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.key_size = key_size
self.value_size = value_size
self.query_projection = nn.Linear(hidden_size, (num_heads * key_size))
self.key_projection = nn.Linear(hidden_size, (num_heads * key_size))
self.value_projection = nn.Linear(hidden_size, (num_heads * value_size))
init.normal_(self.query_projection.weight, mean=0, std=math.sqrt((2.0 / (hidden_size + key_size))))
init.normal_(self.key_projection.weight, mean=0, std=math.sqrt((2.0 / (hidden_size + key_size))))
init.normal_(self.value_projection.weight, mean=0, std=math.sqrt((2.0 / (hidden_size + value_size))))
self.output_projection = nn.Linear((num_heads * value_size), out_size)
init.xavier_normal_(self.output_projection.weight)
self.attention = attention
def forward(self, query, key, value, mask=None):
single_query = False
if (len(query.size()) == 2):
query = query.unsqueeze(1)
single_query = True
if (mask is not None):
if (len(mask.size()) == 2):
mask = mask.unsqueeze(1)
else:
assert (mask.size(1) == query.size(1))
(num_heads, key_size, value_size) = (self.num_heads, self.key_size, self.value_size)
(batch_size, num_queries, time_step) = (query.size(0), query.size(1), key.size(1))
query = self.query_projection(query).view(batch_size, num_queries, num_heads, key_size)
key = self.key_projection(key).view(batch_size, time_step, num_heads, key_size)
value = self.value_projection(value).view(batch_size, time_step, num_heads, value_size)
if (mask is not None):
if (len(mask.size()) == 2):
mask = mask.unsqueeze(0).repeat(num_heads, 1, 1).view((- 1), time_step)
else:
mask = mask.unsqueeze(0).repeat(num_heads, 1, 1, 1).view((- 1), num_queries, time_step)
query = query.permute(2, 0, 1, 3).contiguous().view((- 1), num_queries, key_size)
key = key.permute(2, 0, 1, 3).contiguous().view((- 1), time_step, key_size)
value = value.permute(2, 0, 1, 3).contiguous().view((- 1), time_step, value_size)
output = self.attention(query, key, value, mask)
output = output.view(num_heads, batch_size, num_queries, value_size)
output = output.permute(1, 2, 0, 3).contiguous().view(batch_size, num_queries, (- 1))
output = self.output_projection(output)
if single_query:
output = output.squeeze(1)
return output |
def get_concat_2level_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2))
return model |
class BigBirdPegasusOnnxConfig(OnnxSeq2SeqConfigWithPast):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
common_inputs['decoder_input_ids'] = {0: 'batch'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
common_inputs['decoder_input_ids'] = {0: 'batch', 1: 'decoder_sequence'}
common_inputs['decoder_attention_mask'] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
elif (self.task == 'causal-lm'):
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'})])
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_inputs[f'past_key_values.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_inputs[f'past_key_values.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
common_inputs = OrderedDict([('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'})])
return common_inputs
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task in ['default', 'seq2seq-lm']):
common_outputs = super().outputs
else:
common_outputs = super(OnnxConfigWithPast, self).outputs
if self.use_past:
(num_encoder_layers, _) = self.num_layers
for i in range(num_encoder_layers):
common_outputs[f'present.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
common_outputs[f'present.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _generate_dummy_inputs_for_default_and_seq2seq_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, encoder_seq_length) = common_inputs['input_ids'].shape
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_past_length = (decoder_seq_length + 3)
decoder_shape = (batch, num_decoder_attention_heads, decoder_past_length, (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['decoder_attention_mask'] = torch.cat([common_inputs['decoder_attention_mask'], torch.ones(batch, decoder_past_length)], dim=1)
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def _generate_dummy_inputs_for_causal_lm(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size, seq_length, is_pair, framework)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
(num_encoder_layers, _) = self.num_layers
(num_encoder_attention_heads, _) = self.num_attention_heads
past_shape = (batch, num_encoder_attention_heads, past_key_values_length, (self._config.hidden_size // num_encoder_attention_heads))
mask_dtype = common_inputs['attention_mask'].dtype
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
common_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)]
return common_inputs
def _generate_dummy_inputs_for_sequence_classification_and_question_answering(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_input = ([(' '.join([tokenizer.unk_token]) * seq_length)] * batch_size)
common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
return common_inputs
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
if (self.task in ['default', 'seq2seq-lm']):
common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
elif (self.task == 'causal-lm'):
common_inputs = self._generate_dummy_inputs_for_causal_lm(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
else:
common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
return common_inputs
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
if (self.task in ['default', 'seq2seq-lm']):
flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(flattened_output, name, idx, t) |
def test_flatten_leading_dims() -> None:
x_old = tf.random.uniform([2, 3, 4, 5])
(flat_x_old, unflatten) = flatten_leading_dims(x_old)
npt.assert_array_equal(tf.shape(flat_x_old), [24, 5])
x_new = unflatten(flat_x_old)
npt.assert_array_equal(x_old, x_new) |
class SYNTHIADataSetDepth(BaseDataset):
def __init__(self, root, list_path, set='all', num_classes=16, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), use_depth=False, depth_processing='GASDA', cfg=None, joint_transform=None):
super().__init__(root, list_path, set, max_iters, crop_size, None, mean, joint_transform, cfg)
if (num_classes == 16):
self.id_to_trainid = {3: 0, 4: 1, 2: 2, 21: 3, 5: 4, 7: 5, 15: 6, 9: 7, 6: 8, 1: 9, 10: 10, 17: 11, 8: 12, 19: 13, 12: 14, 11: 15}
elif (num_classes == 7):
self.id_to_trainid = {1: 4, 2: 1, 3: 0, 4: 0, 5: 1, 6: 3, 7: 2, 8: 6, 9: 2, 10: 5, 11: 6, 15: 2, 22: 0}
else:
raise NotImplementedError(f'Not yet supported {num_classes} classes')
self.cfg = cfg
self.joint_transform = joint_transform
self.use_depth = use_depth
self.depth_processing = depth_processing
if self.use_depth:
for (i, file) in enumerate(self.files):
(img_file, label_file, name) = file
depth_file = ((self.root / 'Depth') / name)
self.files[i] = (img_file, label_file, depth_file, name)
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
print('ctrl/dataset/synthia.py --> __init__()')
def get_metadata(self, name, mode=None):
label_file = ((self.root / 'parsed_LABELS') / name)
if (mode == 'original_only'):
img_file = ((self.root / 'RGB') / name)
return (img_file, label_file)
elif (mode == 'original_and_translated'):
img_file1 = ((self.root / 'RGB') / name)
img_file2 = ((self.root / 'SynthiaToCityscapesRGBs/Rui/images') / name)
return (img_file1, img_file2, label_file)
elif (mode == 'translated_only'):
img_file = ((self.root / 'SynthiaToCityscapesRGBs/Rui/images') / name)
return (img_file, label_file)
else:
print('ctrl/dataset/synthia.py --> set proper value for cfg.SYNTHIA_DATALOADING_MODE')
raise NotImplementedError
def __getitem__(self, index):
depth_file = None
if self.use_depth:
(img_file, label_file, depth_file, name) = self.files[index]
else:
(img_file, label_file, name) = self.files[index]
image = self.get_image(img_file)
label = self.get_labels(label_file)
depth = None
if self.use_depth:
depth = self.get_depth(depth_file)
label_copy = (255 * np.ones(label.shape, dtype=np.float32))
for (k, v) in self.id_to_trainid.items():
label_copy[(label == k)] = v
image = self.preprocess(image)
image = image.copy()
label_copy = label_copy.copy()
shape = np.array(image.shape)
depth = depth.copy()
if self.use_depth:
return (image, label_copy, depth, shape, name)
else:
return (image, label_copy, shape, name)
def get_depth(self, file):
if (self.depth_processing == 'GASDA'):
return get_depth_gasda(self, file, phase='train')
elif (self.depth_processing == 'DADA'):
return get_depth_dada(self, file, phase='train') |
class TestFFTFreq():
_if_array_api_backend('numpy.array_api')
_if_array_api_backend('cupy')
_api_compatible
def test_definition(self, xp):
device = SCIPY_DEVICE
try:
x = xp.asarray([0, 1, 2, 3, 4, (- 4), (- 3), (- 2), (- 1)], dtype=xp.float64, device=device)
x2 = xp.asarray([0, 1, 2, 3, 4, (- 5), (- 4), (- 3), (- 2), (- 1)], dtype=xp.float64, device=device)
except TypeError:
x = xp.asarray([0, 1, 2, 3, 4, (- 4), (- 3), (- 2), (- 1)], dtype=xp.float64)
x2 = xp.asarray([0, 1, 2, 3, 4, (- 5), (- 4), (- 3), (- 2), (- 1)], dtype=xp.float64)
y = xp.asarray((9 * fft.fftfreq(9, xp=xp)), dtype=xp.float64)
xp_assert_close(y, x)
y = xp.asarray(((9 * xp.pi) * fft.fftfreq(9, xp.pi, xp=xp)), dtype=xp.float64)
xp_assert_close(y, x)
y = xp.asarray((10 * fft.fftfreq(10, xp=xp)), dtype=xp.float64)
xp_assert_close(y, x2)
y = xp.asarray(((10 * xp.pi) * fft.fftfreq(10, xp.pi, xp=xp)), dtype=xp.float64)
xp_assert_close(y, x2) |
def ordered_yaml_load(yaml_path, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
with open(yaml_path) as stream:
return yaml.load(stream, OrderedLoader) |
class ModulatedDeformConvFunction(Function):
def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = (bias is not None)
if (not ctx.with_bias):
bias = input.new_empty(1)
if (not input.is_cuda):
raise NotImplementedError
if (weight.requires_grad or mask.requires_grad or offset.requires_grad or input.requires_grad):
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
deform_conv_cuda.modulated_deform_conv_cuda_forward(input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias)
return output
_differentiable
def backward(ctx, grad_output):
if (not grad_output.is_cuda):
raise NotImplementedError
(input, offset, mask, weight, bias) = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
deform_conv_cuda.modulated_deform_conv_cuda_backward(input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias)
if (not ctx.with_bias):
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None)
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
(height, width) = input.shape[2:4]
(kernel_h, kernel_w) = weight.shape[2:4]
height_out = ((((height + (2 * ctx.padding)) - ((ctx.dilation * (kernel_h - 1)) + 1)) // ctx.stride) + 1)
width_out = ((((width + (2 * ctx.padding)) - ((ctx.dilation * (kernel_w - 1)) + 1)) // ctx.stride) + 1)
return (n, channels_out, height_out, width_out) |
def main(argv):
logging.basicConfig()
logging.getLogger('pybindgen.typehandlers').setLevel(logging.DEBUG)
(module_abs_src_path, target, extension_name, output_cc_file_name) = argv[1:]
module_name = os.path.basename(module_abs_src_path)
out = MyMultiSectionFactory(output_cc_file_name)
sys.path.insert(0, os.path.join(module_abs_src_path, 'bindings'))
try:
module_apidefs = __import__(('modulegen__%s' % target))
del sys.modules[('modulegen__%s' % target)]
try:
module_customization = __import__('modulegen_customizations')
del sys.modules['modulegen_customizations']
except ImportError:
module_customization = object()
try:
from callbacks_list import callback_classes
except ImportError as ex:
print('', repr(ex), file=sys.stderr)
callback_classes = []
else:
print('', repr(callback_classes), file=sys.stderr)
finally:
sys.path.pop(0)
(apidefs_file, dummy) = os.path.splitext(module_apidefs.__file__)
apidefs_file += '.py'
pybindgen.settings.error_handler = ErrorHandler(apidefs_file)
root_module = module_apidefs.module_init()
root_module.set_name(extension_name)
root_module.add_include(('"ns3/%s-module.h"' % module_name))
ns3modulegen_core_customizations.add_std_ios_openmode(root_module)
module_apidefs.register_types(root_module)
if hasattr(module_customization, 'post_register_types'):
module_customization.post_register_types(root_module)
ns3modulegen_core_customizations.register_callback_classes(root_module.after_forward_declarations, callback_classes)
module_apidefs.register_methods(root_module)
if hasattr(module_customization, 'post_register_methods'):
module_customization.post_register_methods(root_module)
ns3modulegen_core_customizations.Object_customizations(root_module)
ns3modulegen_core_customizations.Attribute_customizations(root_module)
ns3modulegen_core_customizations.generate_callback_classes(root_module, callback_classes)
module_apidefs.register_functions(root_module)
if hasattr(module_customization, 'post_register_functions'):
module_customization.post_register_functions(root_module)
root_module.generate(out) |
_decorator(0)
def get_friends(html):
cont = public.get_left(html)
soup = BeautifulSoup(cont, 'lxml')
return int(soup.find_all('strong')[0].get_text()) |
class ImageCaptioningPyTorchModel():
def __init__(self, model_path, infos_path, cnn_model='resnet101', device='cuda'):
with open(infos_path, 'rb') as f:
infos = utils.pickle_load(f)
opt = infos['opt']
opt.model = model_path
opt.cnn_model = cnn_model
opt.device = device
opt.vocab = infos['vocab']
model = models.setup(opt)
del infos
del opt.vocab
model.load_state_dict(torch.load(opt.model, map_location='cpu'))
model.to(opt.device)
model.eval()
crit = losses.LanguageModelCriterion()
self.opt = opt
self.model = model
self.crit = crit
self.infos_path = infos_path
torch.cuda.empty_cache()
gc.collect()
def __call__(self, image_folder, batch_size):
opt = self.opt
opt.batch_size = batch_size
opt.image_folder = image_folder
opt.coco_json = ''
opt.dataset = opt.input_json
opt.verbose_loss = 0
opt.verbose = False
opt.dump_path = 0
opt.dump_images = 0
opt.num_images = (- 1)
opt.language_eval = 0
with open(self.infos_path, 'rb') as f:
infos = utils.pickle_load(f)
opt.vocab = infos['vocab']
if (len(opt.image_folder) == 0):
loader = DataLoader(opt)
else:
loader = DataLoaderRaw({'folder_path': opt.image_folder, 'coco_json': opt.coco_json, 'batch_size': opt.batch_size, 'cnn_model': opt.cnn_model})
loader.dataset.ix_to_word = opt.vocab
del infos
del opt.vocab
(_, split_predictions, _) = eval_utils.eval_split(self.model, self.crit, loader, vars(opt))
captions = []
for line in split_predictions:
captions.append(line['caption'])
del loader
torch.cuda.empty_cache()
gc.collect()
return (captions if (len(captions) > 1) else captions[0]) |
class distill():
def __init__(self, args, model, teacher):
self.args = args
self.student = model
self.teacher = teacher
self.student_layers = self.sampled_layer(args.arch, self.student)
self.teacher_layers = self.sampled_layer(args.teacher_arch, self.teacher)
def kwargs(**kwargs):
return kwargs
setattr(tcl.Conv2d, 'pre_defined', kwargs(kernel_initializer=tf.keras.initializers.he_normal(), use_biases=False, activation_fn=None, trainable=True))
setattr(tcl.BatchNorm, 'pre_defined', kwargs(trainable=True))
self.student.aux_layers = [tf.keras.Sequential([tcl.Conv2d([1, 1], tl.gamma.shape[(- 1)]), tcl.BatchNorm()]) for (sl, tl) in zip(self.student_layers, self.teacher_layers)]
self.beta = 1000.0
def sampled_layer(self, arch, model):
if ('WResNet' in arch):
for i in range(1, 3):
model.Layers[('BasicBlock%d.0/bn' % i)].keep_feat = 'pre_act'
model.Layers['bn_last'].keep_feat = 'pre_act'
return ([model.Layers[('BasicBlock%d.0/bn' % i)] for i in range(1, 3)] + [model.Layers['bn_last']])
def loss(self, sl, tl, aux):
s = aux(sl.feat, training=True)
t = tf.stop_gradient(tl.feat)
return tf.reduce_mean(tf.square((tf.nn.l2_normalize(s, [1, 2]) - tf.nn.l2_normalize(t, [1, 2]))))
def forward(self, input, labels, target_loss):
self.teacher(input, training=False)
return (target_loss + (tf.add_n([(self.loss(*data) / (2 ** ((len(self.student_layers) - i) - 1))) for (i, data) in enumerate(zip(self.student_layers, self.teacher_layers, self.student.aux_layers))]) * self.beta)) |
class TestNormalizeCell():
class ConcreteMetric(AbstractMetric):
def evaluate_single_no_special_case(self, target, prediction):
return 42.0
def abstract_metric_instance(self):
return self.ConcreteMetric()
def test_evaluate_single_special_case_empty_lists(self, abstract_metric_instance):
target = []
prediction = []
result = abstract_metric_instance.evaluate_single_special_case(target, prediction)
assert (result == 1.0)
def test_evaluate_single_special_case_empty_prediction(self, abstract_metric_instance):
target = [[1, 2], [3, 4]]
prediction = []
result = abstract_metric_instance.evaluate_single_special_case(target, prediction)
assert (result == 0.0)
def test_evaluate_single_special_case_empty_target(self, abstract_metric_instance):
target = []
prediction = [[1, 2], [3, 4]]
result = abstract_metric_instance.evaluate_single_special_case(target, prediction)
assert (result == 0.0)
def test_evaluate_single_test_metric(self, abstract_metric_instance):
target = [[1, 2], [3, 4]]
prediction = [[5, 6], [7, 8]]
result = abstract_metric_instance.evaluate_single_test_metric(target, prediction)
assert (result == 42.0)
def test_evaluate_tests(self, abstract_metric_instance):
targets = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
predictions = [[[9, 10], [11, 12]], [[13, 14], [15, 16]]]
results = abstract_metric_instance.evaluate_tests(targets, predictions)
assert (results == [42.0, 42.0])
.parametrize('value, expected_value', [(42, '42'), ('42', '42'), (3.14159, '3.14'), ('HELLO\n ', 'hello'), (None, 'None'), (math.nan, 'None'), ('abc123', 'abc123'), (np.float_(10), '10.0'), (np.int_(10), '10'), (True, True)])
def test_normalize_cell(self, abstract_metric_instance, value, expected_value):
assert (abstract_metric_instance.normalize_cell(value) == expected_value)
def test_normalize_time(self, abstract_metric_instance):
target = np.random.rand(20, 1000)
prediction = np.random.rand(20, 1000)
start_time = time.time()
abstract_metric_instance.evaluate_single_test_metric(list(target), list(prediction))
assert ((start_time - time.time()) < 0.5) |
.parametrize('input_dim, output_dim, hidden_sizes', plain_settings)
def test_softplus_std_network_output_values(input_dim, output_dim, hidden_sizes):
init_std = 2.0
module = GaussianMLPModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, init_std=init_std, hidden_nonlinearity=None, std_parameterization='softplus', hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_)
dist = module(torch.ones(input_dim))
exp_mean = (input_dim * torch.Tensor(hidden_sizes).prod().item())
exp_variance = (torch.Tensor([init_std]).exp().add(1.0).log() ** 2)
assert dist.mean.equal(torch.full((output_dim,), exp_mean, dtype=torch.float))
assert dist.variance.equal(torch.full((output_dim,), exp_variance[0], dtype=torch.float))
assert (dist.rsample().shape == (output_dim,)) |
_function()
def body_contains_fortune(x):
return (POSITIVE if ('fortune' in x.body) else ABSTAIN) |
def build_pixel_sampler(cfg, **default_args):
return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) |
def _move_date_to_end(d: datetime.datetime) -> datetime.datetime:
if (d.time() == datetime.time.min):
return ((d + datetime.timedelta(days=1)) - datetime.timedelta(minutes=1))
else:
return d |
class PolyLrUpdaterHook(LrUpdaterHook):
def __init__(self, power=1.0, min_lr=0.0, **kwargs):
self.power = power
self.min_lr = min_lr
super(PolyLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, trainer, base_lr):
if self.by_epoch:
progress = trainer.epoch
max_progress = trainer.max_epochs
else:
progress = trainer.iter
max_progress = trainer.max_iters
coeff = ((1 - (progress / max_progress)) ** self.power)
return (((base_lr - self.min_lr) * coeff) + self.min_lr) |
def _train_vae(vae_trainer, replay_buffer, epoch, batches=50, oracle_data=False):
batch_sampler = replay_buffer.random_vae_training_data
if oracle_data:
batch_sampler = None
vae_trainer.train_epoch(epoch, sample_batch=batch_sampler, batches=batches, from_rl=True) |
class UpBlock3D(nn.Module):
def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_upsample=True):
super().__init__()
resnets = []
temp_convs = []
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1))
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):
for (resnet, temp_conv) in zip(self.resnets, self.temp_convs):
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states |
def PoincareHomologyThreeSphere():
return UniqueSimplicialComplex([[1, 2, 4, 9], [1, 2, 4, 15], [1, 2, 6, 14], [1, 2, 6, 15], [1, 2, 9, 14], [1, 3, 4, 12], [1, 3, 4, 15], [1, 3, 7, 10], [1, 3, 7, 12], [1, 3, 10, 15], [1, 4, 9, 12], [1, 5, 6, 13], [1, 5, 6, 14], [1, 5, 8, 11], [1, 5, 8, 13], [1, 5, 11, 14], [1, 6, 13, 15], [1, 7, 8, 10], [1, 7, 8, 11], [1, 7, 11, 12], [1, 8, 10, 13], [1, 9, 11, 12], [1, 9, 11, 14], [1, 10, 13, 15], [2, 3, 5, 10], [2, 3, 5, 11], [2, 3, 7, 10], [2, 3, 7, 13], [2, 3, 11, 13], [2, 4, 9, 13], [2, 4, 11, 13], [2, 4, 11, 15], [2, 5, 8, 11], [2, 5, 8, 12], [2, 5, 10, 12], [2, 6, 10, 12], [2, 6, 10, 14], [2, 6, 12, 15], [2, 7, 9, 13], [2, 7, 9, 14], [2, 7, 10, 14], [2, 8, 11, 15], [2, 8, 12, 15], [3, 4, 5, 14], [3, 4, 5, 15], [3, 4, 12, 14], [3, 5, 10, 15], [3, 5, 11, 14], [3, 7, 12, 13], [3, 11, 13, 14], [3, 12, 13, 14], [4, 5, 6, 7], [4, 5, 6, 14], [4, 5, 7, 15], [4, 6, 7, 11], [4, 6, 10, 11], [4, 6, 10, 14], [4, 7, 11, 15], [4, 8, 9, 12], [4, 8, 9, 13], [4, 8, 10, 13], [4, 8, 10, 14], [4, 8, 12, 14], [4, 10, 11, 13], [5, 6, 7, 13], [5, 7, 9, 13], [5, 7, 9, 15], [5, 8, 9, 12], [5, 8, 9, 13], [5, 9, 10, 12], [5, 9, 10, 15], [6, 7, 11, 12], [6, 7, 12, 13], [6, 10, 11, 12], [6, 12, 13, 15], [7, 8, 10, 14], [7, 8, 11, 15], [7, 8, 14, 15], [7, 9, 14, 15], [8, 12, 14, 15], [9, 10, 11, 12], [9, 10, 11, 16], [9, 10, 15, 16], [9, 11, 14, 16], [9, 14, 15, 16], [10, 11, 13, 16], [10, 13, 15, 16], [11, 13, 14, 16], [12, 13, 14, 15], [13, 14, 15, 16]], name='Triangulation of the Poincare homology 3-sphere') |
class Connector(object):
def AllianceStatusStream(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_stream(request, target, '/grpc.Connector/AllianceStatusStream', fedn__pb2.ClientAvailableMessage.SerializeToString, fedn__pb2.Status.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def SendStatus(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/SendStatus', fedn__pb2.Status.SerializeToString, fedn__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ListActiveClients(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/ListActiveClients', fedn__pb2.ListClientsRequest.SerializeToString, fedn__pb2.ClientList.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def AcceptingClients(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/AcceptingClients', fedn__pb2.ConnectionRequest.SerializeToString, fedn__pb2.ConnectionResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def SendHeartbeat(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/SendHeartbeat', fedn__pb2.Heartbeat.SerializeToString, fedn__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ReassignClient(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/ReassignClient', fedn__pb2.ReassignRequest.SerializeToString, fedn__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
def ReconnectClient(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.Connector/ReconnectClient', fedn__pb2.ReconnectRequest.SerializeToString, fedn__pb2.Response.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
class V1LayerParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _V1LAYERPARAMETER |
def prepare_attention(attention_states, kd_states, attention_option, num_units, reuse=False):
with variable_scope.variable_scope('attn_keys', reuse=reuse) as scope:
attention_keys = layers.linear(attention_states, num_units, biases_initializer=None, scope=scope)
if (kd_states is not None):
attention_values = (attention_states, kd_states)
else:
attention_values = attention_states
attention_score_fn = _create_attention_score_fn('attn_score', num_units, attention_option, reuse)
attention_construct_fn = _create_attention_construct_fn('attn_construct', num_units, attention_score_fn, reuse)
return (attention_keys, attention_values, attention_construct_fn) |
def test_consensus_score():
a = [[True, True, False, False], [False, False, True, True]]
b = a[::(- 1)]
assert (consensus_score((a, a), (a, a)) == 1)
assert (consensus_score((a, a), (b, b)) == 1)
assert (consensus_score((a, b), (a, b)) == 1)
assert (consensus_score((a, b), (b, a)) == 1)
assert (consensus_score((a, a), (b, a)) == 0)
assert (consensus_score((a, a), (a, b)) == 0)
assert (consensus_score((b, b), (a, b)) == 0)
assert (consensus_score((b, b), (b, a)) == 0) |
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity', return_trans_inv=False):
if (reference_pts is None):
if ((crop_size[0] == 96) and (crop_size[1] == 112)):
reference_pts = REFERENCE_FACIAL_POINTS
else:
default_square = False
inner_padding_factor = 0
outer_padding = (0, 0)
output_size = crop_size
reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding, default_square)
ref_pts = np.float32(reference_pts)
ref_pts = (((ref_pts - (112 / 2)) * 0.85) + (112 / 2))
ref_pts *= (crop_size[0] / 112.0)
ref_pts_shp = ref_pts.shape
if ((max(ref_pts_shp) < 3) or (min(ref_pts_shp) != 2)):
raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
if (ref_pts_shp[0] == 2):
ref_pts = ref_pts.T
src_pts = np.float32(facial_pts)
src_pts_shp = src_pts.shape
if ((max(src_pts_shp) < 3) or (min(src_pts_shp) != 2)):
raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
if (src_pts_shp[0] == 2):
src_pts = src_pts.T
if (src_pts.shape != ref_pts.shape):
raise FaceWarpException('facial_pts and reference_pts must have the same shape')
if (align_type is 'cv2_affine'):
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
elif (align_type is 'affine'):
tfm = get_affine_transform_matrix(src_pts, ref_pts)
else:
(tfm, tfm_inv) = get_similarity_transform_for_cv2(src_pts, ref_pts)
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
if return_trans_inv:
return (face_img, tfm_inv)
else:
return face_img |
_array_function
class TestVerifyMatchingSignatures(object):
def test_verify_matching_signatures(self):
verify_matching_signatures((lambda x: 0), (lambda x: 0))
verify_matching_signatures((lambda x=None: 0), (lambda x=None: 0))
verify_matching_signatures((lambda x=1: 0), (lambda x=None: 0))
with assert_raises(RuntimeError):
verify_matching_signatures((lambda a: 0), (lambda b: 0))
with assert_raises(RuntimeError):
verify_matching_signatures((lambda x: 0), (lambda x=None: 0))
with assert_raises(RuntimeError):
verify_matching_signatures((lambda x=None: 0), (lambda y=None: 0))
with assert_raises(RuntimeError):
verify_matching_signatures((lambda x=1: 0), (lambda y=1: 0))
def test_array_function_dispatch(self):
with assert_raises(RuntimeError):
_function_dispatch((lambda x: (x,)))
def f(y):
pass
_function_dispatch((lambda x: (x,)), verify=False)
def f(y):
pass |
def clean_by_unp(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
class EncoderInterface(nn.Module):
def __init__(self):
super(EncoderInterface, self).__init__()
def count_parameters(self) -> int:
return sum([p.numel for p in self.parameters()])
def update_dropout(self, dropout_p: float) -> None:
for (name, child) in self.named_children():
if isinstance(child, nn.Dropout):
child.p = dropout_p
def forward(self, inputs: Tensor, input_lengths: Tensor):
raise NotImplementedError |
def video2img(video_path):
start = time.time()
video_id = os.path.splitext(os.path.basename(video_path))[0]
target_dir = os.path.join('data/videos', video_id, 'images')
if (not os.path.exists(target_dir)):
os.makedirs(target_dir)
cap = cv2.VideoCapture(video_path)
if (not cap.isOpened()):
print('Broken or invalid video. Quit...')
exit(1)
(frame_cnt, cnt) = (0, 0)
while cap.isOpened():
(ret, frame) = cap.read()
if (ret is False):
break
cnt += 1
frame = cv2.resize(frame, (1280, 720))
cv2.imwrite(os.path.join(target_dir, 'rgb_{0:05d}.jpg'.format(frame_cnt)), frame)
frame_cnt += 1
cap.release()
print('Outdir: {}, number of frame: {}'.format(target_dir, frame_cnt))
print('Elapsed time: {} s'.format((time.time() - start))) |
class ECAPA_TDNN(nn.Module):
def __init__(self, input_size: int=80, output_size: int=1536, C: int=1024, **kwargs):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.conv1 = nn.Conv1d(input_size, C, kernel_size=5, stride=1, padding=2)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(C)
self.layer1 = _Bottle2neck(C, C, kernel_size=3, dilation=2, scale=8)
self.layer2 = _Bottle2neck(C, C, kernel_size=3, dilation=3, scale=8)
self.layer3 = _Bottle2neck(C, C, kernel_size=3, dilation=4, scale=8)
self.layer4 = nn.Conv1d((3 * C), output_size, kernel_size=1)
def input_size(self):
return self._indim
def output_size(self):
return self._outdim
def forward(self, x: torch.FloatTensor):
x = self.conv1(x.transpose(1, 2).contiguous())
x = self.relu(x)
x = self.bn1(x)
x1 = self.layer1(x)
x2 = self.layer2((x + x1))
x3 = self.layer3(((x + x1) + x2))
x = self.layer4(torch.cat((x1, x2, x3), dim=1))
x = self.relu(x)
x = x.transpose(1, 2).contiguous()
return x |
def main(n, dim, lamb, norm):
two_norm = (norm / math.sqrt(dim))
delta = (1.0 / n)
alphas = np.linspace(0.001, 0.5, 1000)
sgd = (lambda a: sgd_get_epsilon_expected(a, n, dim, delta, (2.0 * two_norm), 1.0))
cov = (lambda a: covar_get_epsilon(a, n, dim, two_norm))
out = (lambda a: output_pert_linreg_get_epsilon(a, n, dim, lamb, two_norm))
methods = [sgd, cov, out]
method_names = ['stochastic gradient descent', 'covariance perturbation', 'output perturbation']
colors = ['black', 'red', 'blue']
linewidth = 2.5
plt.figure()
for (i, method) in enumerate(methods):
plt.plot(alphas, list(map(method, alphas)), color=colors[i], linewidth=linewidth)
plt.legend(method_names)
plt.show() |
.parametrize('shapes', [((4, 84, 84), 3136)])
.parametrize('filters', [[(32, 8, 4), (64, 4, 2), (64, 3, 1)]])
.parametrize('feature_size', [512])
.parametrize('batch_size', [32])
.parametrize('use_batch_norm', [False, True])
.parametrize('dropout_rate', [None, 0.2])
.parametrize('activation', [torch.nn.ReLU()])
def test_pixel_encoder(shapes: Tuple[(Sequence[int], int)], filters: List[List[int]], feature_size: int, batch_size: int, use_batch_norm: bool, dropout_rate: Optional[float], activation: torch.nn.Module) -> None:
(observation_shape, _) = shapes
encoder = PixelEncoder(observation_shape=observation_shape, filters=filters, feature_size=feature_size, use_batch_norm=use_batch_norm, dropout_rate=dropout_rate, activation=activation)
x = torch.rand((batch_size, *observation_shape))
y = encoder(x)
assert (y.shape == (batch_size, feature_size))
encoder.eval()
eval_y = encoder(x)
if (use_batch_norm or dropout_rate):
assert (not torch.allclose(y, eval_y))
else:
assert torch.allclose(y, eval_y)
check_parameter_updates(encoder, (x,)) |
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')])
return |
def set_value(dic, keys_chain, value):
node = dic
for key in keys_chain[:(- 1)]:
if (key in node):
node = node[key]
else:
node[key] = {}
node = node[key]
node[keys_chain[(- 1)]] = value |
def split_testing_frames():
testing_frames_root = os.path.join(DATA_ROOT, 'testing', 'frames')
testing_frame_mask_root = os.path.join(DATA_ROOT, 'testing', 'test_frame_mask')
testing_pixel_mask_root = os.path.join(DATA_ROOT, 'testing', 'test_pixel_mask')
img_root = os.path.join(BIN_ROOT, 'test', 'image')
gt_root = os.path.join(BIN_ROOT, 'test', 'groundtruth')
scene_folders = os.listdir(testing_frames_root)
scene_folders.sort()
for sf in scene_folders:
scenne_class = sf.split('_')[0]
img_path = os.path.join(img_root, scenne_class)
os.makedirs(img_path, exist_ok=True)
gt_path = os.path.join(gt_root, scenne_class)
os.makedirs(gt_path, exist_ok=True)
frames_path = os.path.join(testing_frames_root, sf)
frames_list = glob.glob((frames_path + '/*.*'))
frames_list.sort()
frames_pixel_masks = np.load(os.path.join(testing_pixel_mask_root, (sf + '.npy')))
for (cnt, f) in enumerate(frames_list):
if ((cnt % 1) == 0):
frame = cv2.imread(f)
frame = cv2.resize(frame, (256, 256))
frame_name = (os.path.basename(f).split('.')[0] + '.png')
cv2.imwrite(os.path.join(img_path, frame_name), frame)
logger.info(os.path.join(img_path, frame_name))
gt = (frames_pixel_masks[cnt] * 255)
gt = cv2.resize(gt, (256, 256), cv2.INTER_NEAREST)
cv2.imwrite(os.path.join(gt_path, frame_name), gt) |
def su3dabc(v: Tensor) -> Tensor:
vT = tf.transpose(v)
a00 = (d007 * vT[7])
a03 = (d035 * vT[5])
a04 = (d046 * vT[6])
a05 = (d035 * vT[3])
a06 = (d046 * vT[4])
a07 = (d007 * vT[0])
a11 = (d117 * vT[7])
a13 = (d136 * vT[6])
a14 = (d145 * vT[5])
a15 = (d145 * vT[4])
a16 = (d136 * vT[3])
a17 = (d117 * vT[1])
a22 = (d227 * vT[7])
a23 = (d233 * vT[3])
a24 = (d244 * vT[4])
a25 = (d255 * vT[5])
a26 = (d266 * vT[6])
a27 = (d227 * vT[2])
a33 = ((d337 * vT[7]) + (d233 * vT[2]))
a35 = (d035 * vT[0])
a36 = (d136 * vT[1])
a37 = (d337 * vT[3])
a44 = ((d447 * vT[7]) + (d244 * vT[2]))
a45 = (d145 * vT[1])
a46 = (d046 * vT[0])
a47 = (d447 * vT[4])
a55 = ((d557 * vT[7]) + (d255 * vT[2]))
a57 = (d557 * vT[5])
a66 = ((d667 * vT[7]) + (d266 * vT[2]))
a67 = (d667 * vT[6])
a77 = (d777 * vT[7])
zii = tf.zeros(vT[0].shape, dtype=vT[0].dtype)
return tf.stack([tf.stack([a00, zii, zii, a03, a04, a05, a06, a07], (- 1)), tf.stack([zii, a11, zii, a13, a14, a15, a16, a17], (- 1)), tf.stack([zii, zii, a22, a23, a24, a25, a26, a27], (- 1)), tf.stack([a03, a13, a23, a33, zii, a35, a36, a37], (- 1)), tf.stack([a04, a14, a24, zii, a44, a45, a46, a47], (- 1)), tf.stack([a05, a15, a25, a35, a45, a55, zii, a57], (- 1)), tf.stack([a06, a16, a26, a36, a46, zii, a66, a67], (- 1)), tf.stack([a07, a17, a27, a37, a47, a57, a67, a77], (- 1))], axis=(- 1)) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='sparse.linalg', module='isolve', private_modules=['_isolve'], all=__all__, attribute=name) |
class _LifeSpan():
def __init__(self):
self.begin_func_idx = (- 1)
self.end_func_idx = (- 1)
def needed_at(self, func_idx):
needed = (self.begin_func_idx <= func_idx)
needed &= (self.end_func_idx >= func_idx)
return needed |
class Partition5(nn.Module):
LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[15]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[15]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[16]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[16]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:5'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1]
self.lookup = {'l_0': 'blocks.15.norm1', 'l_1': 'blocks.15.attn.qkv', 'l_2': 'blocks.15.attn.attn_drop', 'l_3': 'blocks.15.attn.proj', 'l_4': 'blocks.15.attn.proj_drop', 'l_5': 'blocks.15.drop_path', 'l_6': 'blocks.15.norm2', 'l_7': 'blocks.15.mlp.fc1', 'l_8': 'blocks.15.mlp.act', 'l_9': 'blocks.15.mlp.drop', 'l_10': 'blocks.15.mlp.fc2', 'l_11': 'blocks.15.mlp.drop', 'l_12': 'blocks.15.drop_path', 'l_13': 'blocks.16.norm1', 'l_14': 'blocks.16.attn.qkv', 'l_15': 'blocks.16.attn.attn_drop', 'l_16': 'blocks.16.attn.proj', 'l_17': 'blocks.16.attn.proj_drop', 'l_18': 'blocks.16.drop_path', 'l_19': 'blocks.16.norm2', 'l_20': 'blocks.16.mlp.fc1', 'l_21': 'blocks.16.mlp.act', 'l_22': 'blocks.16.mlp.drop', 'l_23': 'blocks.16.mlp.fc2', 'l_24': 'blocks.16.mlp.drop', 'l_25': 'blocks.16.drop_path', 'l_26': 'blocks.17.norm1', 'l_27': 'blocks.17.attn.qkv', 'l_28': 'blocks.17.attn.attn_drop', 'l_29': 'blocks.17.attn.proj', 'l_30': 'blocks.17.attn.proj_drop', 'l_31': 'blocks.17.drop_path', 'l_32': 'blocks.17.norm2', 'l_33': 'blocks.17.mlp.fc1', 'l_34': 'blocks.17.mlp.act', 'l_35': 'blocks.17.mlp.drop', 'l_36': 'blocks.17.mlp.fc2', 'l_37': 'blocks.17.mlp.drop', 'l_38': 'blocks.17.drop_path'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = self.l_0(x0)
t_1 = t_0.shape
t_2 = t_1[0]
t_3 = t_1[1]
t_1 = t_1[2]
t_0 = self.l_1(t_0)
t_4 = (t_1 // 16)
t_4 = t_0.reshape(t_2, t_3, 3, 16, t_4)
t_4 = t_4.permute(2, 0, 3, 1, 4)
t_0 = t_4[0]
t_5 = t_4[1]
t_4 = t_4[2]
t_5 = t_5.transpose((- 2), (- 1))
t_5 = (t_0 t_5)
t_5 = (t_5 * 0.125)
t_5 = t_5.softmax(dim=(- 1))
t_5 = self.l_2(t_5)
t_4 = (t_5 t_4)
t_4 = t_4.transpose(1, 2)
t_1 = t_4.reshape(t_2, t_3, t_1)
t_1 = self.l_3(t_1)
t_1 = self.l_4(t_1)
t_1 = self.l_5(t_1)
t_1 = (x0 + t_1)
t_3 = self.l_6(t_1)
t_3 = self.l_7(t_3)
t_3 = self.l_8(t_3)
t_3 = self.l_9(t_3)
t_3 = self.l_10(t_3)
t_3 = self.l_11(t_3)
t_3 = self.l_12(t_3)
t_3 = (t_1 + t_3)
t_1 = self.l_13(t_3)
t_2 = t_1.shape
t_4 = t_2[0]
t_5 = t_2[1]
t_2 = t_2[2]
t_1 = self.l_14(t_1)
t_0 = (t_2 // 16)
t_0 = t_1.reshape(t_4, t_5, 3, 16, t_0)
t_0 = t_0.permute(2, 0, 3, 1, 4)
t_1 = t_0[0]
t_6 = t_0[1]
t_0 = t_0[2]
t_6 = t_6.transpose((- 2), (- 1))
t_6 = (t_1 t_6)
t_6 = (t_6 * 0.125)
t_6 = t_6.softmax(dim=(- 1))
t_6 = self.l_15(t_6)
t_0 = (t_6 t_0)
t_0 = t_0.transpose(1, 2)
t_2 = t_0.reshape(t_4, t_5, t_2)
t_2 = self.l_16(t_2)
t_2 = self.l_17(t_2)
t_2 = self.l_18(t_2)
t_2 = (t_3 + t_2)
t_3 = self.l_19(t_2)
t_3 = self.l_20(t_3)
t_3 = self.l_21(t_3)
t_3 = self.l_22(t_3)
t_3 = self.l_23(t_3)
t_3 = self.l_24(t_3)
t_3 = self.l_25(t_3)
t_3 = (t_2 + t_3)
t_2 = self.l_26(t_3)
t_5 = t_2.shape
t_4 = t_5[0]
t_0 = t_5[1]
t_5 = t_5[2]
t_2 = self.l_27(t_2)
t_6 = (t_5 // 16)
t_6 = t_2.reshape(t_4, t_0, 3, 16, t_6)
t_6 = t_6.permute(2, 0, 3, 1, 4)
t_2 = t_6[0]
t_1 = t_6[1]
t_6 = t_6[2]
t_1 = t_1.transpose((- 2), (- 1))
t_1 = (t_2 t_1)
t_1 = (t_1 * 0.125)
t_1 = t_1.softmax(dim=(- 1))
t_1 = self.l_28(t_1)
t_6 = (t_1 t_6)
t_6 = t_6.transpose(1, 2)
t_5 = t_6.reshape(t_4, t_0, t_5)
t_5 = self.l_29(t_5)
t_5 = self.l_30(t_5)
t_5 = self.l_31(t_5)
t_5 = (t_3 + t_5)
t_3 = self.l_32(t_5)
t_3 = self.l_33(t_3)
t_3 = self.l_34(t_3)
t_3 = self.l_35(t_3)
t_3 = self.l_36(t_3)
t_3 = self.l_37(t_3)
t_3 = self.l_38(t_3)
t_3 = (t_5 + t_3)
return (t_3,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def main():
if (CONFIG['exp']['model'] not in ('binarygan', 'gan')):
raise ValueError('Unrecognizable model name')
print('Start experiment: {}'.format(CONFIG['exp']['exp_name']))
x_train = load_data()
with tf.Session(config=CONFIG['tensorflow']) as sess:
if (CONFIG['exp']['model'] == 'gan'):
gan = GAN(sess, CONFIG['model'])
elif (CONFIG['exp']['model'] == 'binarygan'):
gan = BinaryGAN(sess, CONFIG['model'])
gan.init_all()
if (CONFIG['exp']['pretrained_dir'] is not None):
gan.load_latest(CONFIG['exp']['pretrained_dir'])
gan.train(x_train, CONFIG['train']) |
.parametrize('precision_level', ['32b', '64b'])
def test_set_precision_by_string_wins(precision_level):
conflicting_precision = ('32b' if (precision_level == '64b') else '64b')
pyhf.set_backend(pyhf.tensor.numpy_backend(precision=conflicting_precision), precision=precision_level)
assert (pyhf.tensorlib.precision == precision_level.lower()) |
def inception_v3_parameters(weight_decay=4e-05, stddev=0.1, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
with scopes.arg_scope([ops.conv2d, ops.fc], weight_decay=weight_decay):
with scopes.arg_scope([ops.conv2d], stddev=stddev, activation=tf.nn.relu, batch_norm_params={'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon}) as arg_scope:
(yield arg_scope) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.