code stringlengths 101 5.91M |
|---|
def test_UnmaskedArray_NumpyArray():
a = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64)))
assert (a.to_typetracer().form == a.form)
assert (a.to_typetracer().form.type == a.form.type)
assert (len(a) == 4)
assert (a[2] == 2.2)
assert (a[(- 2)] == 2.2)
assert (type(a[2]) is np.float64)
with pytest.raises(IndexError):
a[4]
with pytest.raises(IndexError):
a[(- 5)]
assert isinstance(a[2:], ak.contents.unmaskedarray.UnmaskedArray)
assert (a[2:][0] == 2.2)
assert (len(a[2:]) == 2)
with pytest.raises(IndexError):
a['bad'] |
def get_name_list(args, sample_io_handler: SampleIoHandler):
if (args.dataset == 'cifar10'):
names = list(map((lambda item: (item[0] + ('_%05d' % item[1]))), sample_io_handler.load()))
elif (args.dataset == 'imagenet'):
names = list(map((lambda item: item[0].split('/')[(- 1)].split('.')[0]), sample_io_handler.load()))
else:
raise Exception('Dataset not implemented')
return names |
def register_Ns3TcpRxBuffer_methods(root_module, cls):
cls.add_constructor([param('ns3::TcpRxBuffer const &', 'arg0')])
cls.add_constructor([param('uint32_t', 'n', default_value='0')])
cls.add_method('Add', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::TcpHeader const &', 'tcph')])
cls.add_method('Available', 'uint32_t', [], is_const=True)
cls.add_method('Extract', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize')])
cls.add_method('Finished', 'bool', [])
cls.add_method('GetSackList', 'ns3::TcpOptionSack::SackList', [], is_const=True)
cls.add_method('GetSackListSize', 'uint32_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IncNextRxSequence', 'void', [])
cls.add_method('MaxBufferSize', 'uint32_t', [], is_const=True)
cls.add_method('MaxRxSequence', 'ns3::SequenceNumber32', [], is_const=True)
cls.add_method('NextRxSequence', 'ns3::SequenceNumber32', [], is_const=True)
cls.add_method('SetFinSequence', 'void', [param('ns3::SequenceNumber32 const &', 's')])
cls.add_method('SetMaxBufferSize', 'void', [param('uint32_t', 's')])
cls.add_method('SetNextRxSequence', 'void', [param('ns3::SequenceNumber32 const &', 's')])
cls.add_method('Size', 'uint32_t', [], is_const=True)
return |
class Transform(Image.ImageTransformHandler):
def __init__(self, data):
self.data = data
def getdata(self):
return (self.method, self.data)
def transform(self, size, image, **options):
(method, data) = self.getdata()
return image.transform(size, method, data, **options) |
class EmptyTransferHook(TransferHook):
def __init__(self):
return
def on_dispatch_start(self):
return
def on_chunk_dispatched(self, chunks: List[Chunk]):
return
def on_dispatch_end(self):
return
def on_chunk_completed(self, chunks: List[Chunk], region_tag: Optional[str]=None):
return
def on_transfer_end(self, transfer_stats):
return
def on_transfer_error(self, error):
return |
def convert_to_cosent_train_dataset(train_samples):
train_dataset = []
for sample in train_samples:
if (len(sample) != 3):
continue
train_dataset.append((sample[0], sample[2]))
train_dataset.append((sample[1], sample[2]))
return train_dataset |
def normal_model_entry_point_handler(model_name):
return NORMAL_MODEL_ENTRY_POINTS_HANDLERS[model_name] |
.parametrize('csc_container', CSC_CONTAINERS)
def test_sparse_and_verbose(csc_container):
old_stdout = sys.stdout
sys.stdout = StringIO()
X = csc_container([[0.0], [1.0]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1, random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
assert re.match('\\[BernoulliRBM\\] Iteration 1, pseudo-likelihood = -?(\\d)+(\\.\\d+)?, time = (\\d|\\.)+s', s)
finally:
sys.stdout = old_stdout |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
assert ((dim % num_heads) == 0)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
if (mask is not None):
if (attn.dim() == 4):
mask = mask.unsqueeze(0).unsqueeze(0).expand_as(attn)
attn.masked_fill_(mask, (- float('inf')))
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x |
class BeamlinesTest(unittest.TestCase):
def setUpClass(cls):
setupTestWavefront()
cls.__fel_source = 'source.h5'
def tearDownClass(cls):
os.remove('source.h5')
del cls.__fel_source
def setUp(self):
self.__files_to_remove = []
self.__dirs_to_remove = []
def tearDown(self):
for f in self.__files_to_remove:
if os.path.isfile(f):
os.remove(f)
for d in self.__dirs_to_remove:
if os.path.isdir(d):
shutil.rmtree(d)
def testSimpleBeamline(self):
bl = simple_beamline.get_beamline()
self.assertIsInstance(bl, Beamline)
def testSimpleBeamlinePropagation(self):
output_file = 'prop_out.h5'
self.__files_to_remove.append(output_file)
propagate_s2e.propagate(self.__fel_source, output_file, simple_beamline.get_beamline)
self.assertIn(output_file, os.listdir('.'))
def testEXFELSPBDay1Beamline(self):
bl = exfel_spb_day1_beamline.get_beamline()
self.assertIsInstance(bl, Beamline)
def testEXFELSPBDay1BeamlinePropagation(self):
output_file = 'prop_out.h5'
self.__files_to_remove.append(output_file)
propagate_s2e.propagate(self.__fel_source, output_file, exfel_spb_day1_beamline.get_beamline)
self.assertIn(output_file, os.listdir('.'))
def testEXFELSPBKBBeamline(self):
bl = exfel_spb_kb_beamline.get_beamline()
self.assertIsInstance(bl, Beamline)
def testEXFELSPBKBBeamlinePropagation(self):
output_file = 'prop_out.h5'
self.__files_to_remove.append(output_file)
propagate_s2e.propagate(self.__fel_source, output_file, exfel_spb_kb_beamline.get_beamline)
self.assertIn(output_file, os.listdir('.'))
def testStepwise(self):
propagate_s2e.stepwise(self.__fel_source, simple_beamline.get_beamline)
for f in range(3):
filename = ('%04d.h5' % f)
self.__files_to_remove.append(filename)
self.assertIn(filename, os.listdir(os.path.dirname(os.path.abspath(__file__)))) |
class UpstreamPretrainExpert(MockingjayPretrainExpert):
def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs):
super(UpstreamPretrainExpert, self).__init__(datarc, upstream_config, device, multi_gpu, **kwargs)
def _get_train_dataloader(self, extracter):
if (('libri_root' in self.datarc) and ('kaldi' not in self.upstream_config['audio'])):
dataset = OnlineAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc)
else:
dataset = KaldiAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], **self.datarc)
self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn) |
def PathGraph(n, pos=None):
G = Graph(n, name='Path graph')
pos_dict = {}
circle = False
if (pos == 'circle'):
circle = True
elif (pos == 'line'):
circle = False
elif (10 < n < 41):
circle = True
if circle:
if (n == 1):
G.set_pos({0: (0, 0)})
else:
G._circle_embedding(list(range(n)), angle=(pi / 2))
else:
counter = 0
rem = (n % 10)
rows = (n // 10)
lr = True
for i in range(rows):
y = (- i)
for j in range(10):
if lr:
x = j
else:
x = (9 - j)
pos_dict[counter] = (x, y)
counter += 1
if lr:
lr = False
else:
lr = True
y = (- rows)
for j in range(rem):
if lr:
x = j
else:
x = (9 - j)
pos_dict[counter] = (x, y)
counter += 1
G.set_pos(pos_dict)
G.add_edges(((i, (i + 1)) for i in range((n - 1))))
return G |
def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta):
if in_bounds((x + p), lb, ub):
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
return (p, p_h, (- p_value))
(p_stride, hits) = step_size_to_bound(x, p, lb, ub)
r_h = np.copy(p_h)
r_h[hits.astype(bool)] *= (- 1)
r = (d * r_h)
p *= p_stride
p_h *= p_stride
x_on_bound = (x + p)
(_, to_tr) = intersect_trust_region(p_h, r_h, Delta)
(to_bound, _) = step_size_to_bound(x_on_bound, r, lb, ub)
r_stride = min(to_bound, to_tr)
if (r_stride > 0):
r_stride_l = (((1 - theta) * p_stride) / r_stride)
if (r_stride == to_bound):
r_stride_u = (theta * to_bound)
else:
r_stride_u = to_tr
else:
r_stride_l = 0
r_stride_u = (- 1)
if (r_stride_l <= r_stride_u):
(a, b, c) = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h)
(r_stride, r_value) = minimize_quadratic_1d(a, b, r_stride_l, r_stride_u, c=c)
r_h *= r_stride
r_h += p_h
r = (r_h * d)
else:
r_value = np.inf
p *= theta
p_h *= theta
p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h)
ag_h = (- g_h)
ag = (d * ag_h)
to_tr = (Delta / norm(ag_h))
(to_bound, _) = step_size_to_bound(x, ag, lb, ub)
if (to_bound < to_tr):
ag_stride = (theta * to_bound)
else:
ag_stride = to_tr
(a, b) = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h)
(ag_stride, ag_value) = minimize_quadratic_1d(a, b, 0, ag_stride)
ag_h *= ag_stride
ag *= ag_stride
if ((p_value < r_value) and (p_value < ag_value)):
return (p, p_h, (- p_value))
elif ((r_value < p_value) and (r_value < ag_value)):
return (r, r_h, (- r_value))
else:
return (ag, ag_h, (- ag_value)) |
def test_RandomRequestApp_start():
tl = Timeline()
node = FakeNode('n1', tl)
app = RandomRequestApp(node, ['n2', 'n3'], 0, .0, .0, 10, 25, 0.8, 1.0)
for _ in range(1000):
app.start()
assert (app.responder == node.reserve_log[(- 1)])
counter = 0
for responder in node.reserve_log:
if (responder == 'n2'):
counter += 1
assert (abs(((counter / (1000 - counter)) - 1)) < 0.1) |
def obj_from_dict(info, parent=None, default_args=None):
assert (isinstance(info, dict) and ('type' in info))
assert (isinstance(default_args, dict) or (default_args is None))
args = info.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if (parent is not None):
obj_type = getattr(parent, obj_type)
else:
obj_type = sys.modules[obj_type]
elif (not isinstance(obj_type, type)):
raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}')
if (default_args is not None):
for (name, value) in default_args.items():
args.setdefault(name, value)
return obj_type(**args) |
def download_s3_file_with_caching(key, local_filename, *, bucket, cache_on_local_disk=True, cache_root_path=None, verbose=False, special_verbose=True, num_tries=5, initial_delay=1.0, delay_factor=math.sqrt(2.0), num_replicas=1, skip_modification_time_check=False):
if cache_on_local_disk:
assert (cache_root_path is not None)
cache_root_path = pathlib.Path(cache_root_path).resolve()
currently_cached = False
cache_filepath = (cache_root_path / key)
if (not cache_filepath.is_file()):
cache_filepath.parent.mkdir(parents=True, exist_ok=True)
elif skip_modification_time_check:
if verbose:
print(f'Skipping the file modification time check the local copy in the cache.')
currently_cached = True
else:
if verbose:
print(f'Getting metadata to check the modification time compared to the local copy ... ', end='')
metadata_start = timer()
metadata = get_s3_object_metadata_with_backoff(key, bucket=bucket, num_tries=num_tries, initial_delay=initial_delay, delay_factor=delay_factor)
metadata_end = timer()
if verbose:
print(f'took {(metadata_end - metadata_start):.3f} seconds')
local_time = datetime.datetime.fromtimestamp(cache_filepath.stat().st_mtime, datetime.timezone.utc)
remote_time = metadata['LastModified']
if (local_time <= remote_time):
if verbose:
print(f'Local copy of key "{key}" is outdated')
else:
currently_cached = True
if (not currently_cached):
if (verbose or special_verbose):
print('{} not available locally or outdated, downloading from S3 ... '.format(key))
download_start = timer()
download_s3_file_with_backoff(key, str(cache_filepath), bucket=bucket, initial_delay=initial_delay, delay_factor=delay_factor, num_replicas=num_replicas)
download_end = timer()
if verbose:
print('Downloading took {:.3f} seconds'.format((download_end - download_start)))
assert cache_filepath.is_file()
if verbose:
print(f'Copying to the target from the cache file {cache_filepath} ...')
shutil.copy(cache_filepath, local_filename)
else:
if verbose:
print('Loading {} from S3 ... '.format(key))
download_start = timer()
download_s3_file_with_backoff(key, local_filename, bucket=bucket, initial_delay=initial_delay, delay_factor=delay_factor, num_replicas=num_replicas)
download_end = timer()
if verbose:
print('Downloading took {:.3f} seconds'.format((download_end - download_start))) |
def mask_rcnn_fcn_head_v1upXconvs(model, blob_in, dim_in, spatial_scale, num_convs):
current = model.RoIFeatureTransform(blob_in, blob_out='_[mask]_roi_feat', blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
dilation = cfg.MRCNN.DILATION
dim_inner = cfg.MRCNN.DIM_REDUCED
for i in range(num_convs):
current = model.Conv(current, ('_[mask]_fcn' + str((i + 1))), dim_in, dim_inner, kernel=3, dilation=dilation, pad=(1 * dilation), stride=1, weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}), bias_init=('ConstantFill', {'value': 0.0}))
current = model.Relu(current, current)
dim_in = dim_inner
model.ConvTranspose(current, 'conv5_mask', dim_inner, dim_inner, kernel=2, pad=0, stride=2, weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}), bias_init=const_fill(0.0))
blob_mask = model.Relu('conv5_mask', 'conv5_mask')
return (blob_mask, dim_inner) |
def get_multi_process_env(model_settings, model_path, num_of_envs, ckpt_step):
def _make_env(rank):
def _init():
task = generate_task(model_settings['benchmarks']['task_generator_id'], **model_settings['task_configs'])
env = CausalWorld(task=task, **model_settings['world_params'], seed=(model_settings['world_seed'] + rank))
env = CurriculumWrapper(env, intervention_actors=model_settings['intervention_actors'], actives=model_settings['actives'])
if (ckpt_step is None):
prefix = 0
else:
prefix = ckpt_step
monitor_file = os.path.join(model_path, ((str(rank) + '_') + str(prefix)))
env = Monitor(env, filename=monitor_file, info_keywords=('fractional_success',))
return env
return _init
return SubprocVecEnv([_make_env(rank=i) for i in range(num_of_envs)]) |
def register_Ns3PacketCounterCalculator_methods(root_module, cls):
cls.add_constructor([param('ns3::PacketCounterCalculator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('FrameUpdate', 'void', [param('std::string', 'path'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Mac48Address', 'realto')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('PacketUpdate', 'void', [param('std::string', 'path'), param('ns3::Ptr< ns3::Packet const >', 'packet')])
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
return |
_footprint
def white_tophat(image, footprint=None, out=None, *, mode='reflect', cval=0.0):
if (out is image):
opened = opening(image, footprint, mode=mode, cval=cval)
if np.issubdtype(opened.dtype, bool):
np.logical_xor(out, opened, out=out)
else:
out -= opened
return out
out = opening(image, footprint, out=out, mode=mode, cval=cval)
if np.issubdtype(out.dtype, bool):
np.logical_xor(image, out, out=out)
else:
np.subtract(image, out, out=out)
return out |
def load_sharded_checkpoint(model, folder, strict=True):
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
if (not os.path.isfile(index_file)):
raise ValueError(f"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.")
with open(index_file, 'r', encoding='utf-8') as f:
index = json.load(f)
shard_files = list(set(index['weight_map'].values()))
loaded_keys = index['weight_map'].keys()
model_keys = model.state_dict().keys()
missing_keys = [key for key in model_keys if (key not in loaded_keys)]
unexpected_keys = [key for key in loaded_keys if (key not in model_keys)]
if (strict and ((len(missing_keys) > 0) or (len(unexpected_keys) > 0))):
error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}'
if (len(missing_keys) > 0):
str_missing_keys = ','.join([f'"{k}"' for k in missing_keys])
error_message += f'''
Missing key(s): {str_missing_keys}.'''
if (len(unexpected_keys) > 0):
str_unexpected_keys = ','.join([f'"{k}"' for k in unexpected_keys])
error_message += f'''
Missing key(s): {str_unexpected_keys}.'''
for shard_file in shard_files:
state_dict = torch.load(os.path.join(folder, shard_file), map_location='cpu')
model.load_state_dict({k: v for (k, v) in state_dict.items() if ('mm_projector' not in k)}, strict=False)
if any((('mm_projector' in x) for x in state_dict.keys())):
print(f'loading mm_projector params from ckpt file {shard_file}')
model.get_model().mm_projector.load_state_dict({k.split('.')[(- 1)]: v for (k, v) in state_dict.items() if ('mm_projector' in k)})
del state_dict
gc.collect()
return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys) |
def get_ego_node_num(graph, target_idx):
G_ego = nx.ego_graph(graph, target_idx, radius=2)
return G_ego.number_of_nodes() |
def prepare_output_path(output_path: str, allowed_suffix: List[str]=[], tag: str='output file', path_type: Literal[('file', 'dir', 'auto')]='auto', overwrite: bool=True) -> None:
if (path_type.lower() == 'dir'):
allowed_suffix = []
exist_result = check_path_existence(output_path, path_type=path_type)
if (exist_result == Existence.MissingParent):
warnings.warn((f'The parent folder of {tag} does not exist: {output_path},' + f' will make dir {Path(output_path).parent.absolute().__str__()}'))
os.makedirs(Path(output_path).parent.absolute().__str__(), exist_ok=True)
elif (exist_result == Existence.DirectoryNotExist):
os.mkdir(output_path)
print(f'Making directory {output_path} for saving results.')
elif (exist_result == Existence.FileNotExist):
suffix_matched = check_path_suffix(output_path, allowed_suffix=allowed_suffix)
if (not suffix_matched):
raise FileNotFoundError(f"The {tag} should be {', '.join(allowed_suffix)}: {output_path}.")
elif (exist_result == Existence.FileExist):
if (not overwrite):
raise FileExistsError(f'{output_path} exists (set overwrite = True to overwrite).')
else:
print(f'Overwriting {output_path}.')
elif (exist_result == Existence.DirectoryExistEmpty):
pass
elif (exist_result == Existence.DirectoryExistNotEmpty):
if (not overwrite):
raise FileExistsError(f'{output_path} is not empty (set overwrite = True to overwrite the files).')
else:
print(f'Overwriting {output_path} and its files.')
else:
raise FileNotFoundError(f'No Existence type for {output_path}.') |
.parametrize('dtype', [ti.u8, ti.u16, ti.u32])
_utils.test(exclude=[ti.opengl, ti.gles])
def test_cast_uint_to_float(dtype):
def func(a: dtype) -> ti.f32:
return ti.cast(a, ti.f32)
def func_sugar(a: dtype) -> ti.f32:
return ti.f32(a)
assert (func(255) == func_sugar(255) == 255) |
def collate_fn(examples):
return {split: {key: np.stack([example[split][key] for example in examples]) for key in examples[0][split].keys()} for split in ['train', 'val']} |
class AvgPoolWithMask(nn.Module):
def __init__(self):
super(AvgPoolWithMask, self).__init__()
self.inf = .0
def forward(self, tensor, mask, dim=1):
masks = mask.view(mask.size(0), mask.size(1), (- 1)).float()
return (torch.sum((tensor * masks.float()), dim=dim) / torch.sum(masks.float(), dim=1)) |
def prepare_ravdess(data_folder, save_json, seed=12):
random.seed(seed)
if skip(save_json):
logger.info('Preparation completed in previous run, skipping.')
return
logger.info('Applying VAD and resampling ...')
for repo in repos:
source_folder = os.path.join(data_folder, repo)
destin_folder = os.path.join(data_folder, 'processed', repo)
if (not os.path.exists(destin_folder)):
os.makedirs(destin_folder)
resampling_for_folder(source_folder, destin_folder)
vad_for_folder(destin_folder, destin_folder)
logger.info('vad and resampling finished')
logger.info('Start RAVDESS concatenation ...')
data_json = concat_wavs(data_folder, save_json)
logger.info('RAVDESS concatenation finished ...')
return data_json |
_level_function(module='ak.str')
def match_like(array, pattern, *, ignore_case=False, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, pattern, ignore_case, highlevel, behavior, attrs) |
def load_ranking(path, qrels=None):
with open(path, 'r') as f:
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split()
if (qrels is not None):
if (l[0] not in qrels):
continue
if (len(l) == 4):
qid = l[0]
pid = l[1].strip()
rank = int(l[2])
if (len(l) == 6):
qid = l[0]
pid = l[2].strip()
rank = int(l[3])
if (qid not in qid_to_ranked_candidate_passages):
qid_to_ranked_candidate_passages[qid] = []
qid_to_ranked_candidate_passages[qid].append(pid)
except:
raise IOError(('"%s" is not valid format' % l))
return qid_to_ranked_candidate_passages |
('detect-content')
('--threshold', '-t', metavar='VAL', type=click.FLOAT, default=30.0, show_default=True, help='Threshold value (float) that the content_val frame metric must exceed to trigger a new scene. Refers to frame metric content_val in stats file.')
('--min-scene-len', '-m', metavar='TIMECODE', type=click.STRING, default='0.6s', show_default=True, help='Minimum size/length of any scene. TIMECODE can be specified as exact number of frames, a time in seconds followed by s, or a timecode in the format HH:MM:SS or HH:MM:SS.nnn')
_context
def detect_content_command(ctx, threshold, min_scene_len):
min_scene_len = parse_timecode(ctx.obj, min_scene_len)
logging.debug('Detecting content, parameters:\n threshold: %d, min-scene-len: %d', threshold, min_scene_len)
ctx.obj.add_detector(scenedetect.detectors.ContentDetector(threshold=threshold, min_scene_len=min_scene_len)) |
def _is_not_done_broadcast(lengths, i, tensor):
is_not_done = ((lengths > (i + 1)) * 1.0)
while (len(is_not_done.shape) < len(tensor.shape)):
is_not_done = jnp.expand_dims(is_not_done, (- 1))
return is_not_done |
class Phi6(CompositeBase):
def __init__(self, N, quad='LG', bc=((0,) * 12), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
self._stencil = {0: (1 / (((((2 * ((2 * n) + 3)) * ((2 * n) + 5)) * ((2 * n) + 7)) * ((2 * n) + 9)) * ((2 * n) + 11))), 2: ((- 3) / ((((((2 * n) + 3) * ((2 * n) + 7)) * ((2 * n) + 9)) * ((2 * n) + 11)) * ((2 * n) + 15))), 4: (15 / (((((2 * ((2 * n) + 5)) * ((2 * n) + 7)) * ((2 * n) + 11)) * ((2 * n) + 15)) * ((2 * n) + 17))), 6: (((- 10) * ((2 * n) + 13)) / (((((((2 * n) + 7) * ((2 * n) + 9)) * ((2 * n) + 11)) * ((2 * n) + 15)) * ((2 * n) + 17)) * ((2 * n) + 19))), 8: (15 / (((((2 * ((2 * n) + 9)) * ((2 * n) + 11)) * ((2 * n) + 15)) * ((2 * n) + 19)) * ((2 * n) + 21))), 10: ((- 3) / ((((((2 * n) + 11) * ((2 * n) + 15)) * ((2 * n) + 17)) * ((2 * n) + 19)) * ((2 * n) + 23))), 12: (1 / (((((2 * ((2 * n) + 15)) * ((2 * n) + 17)) * ((2 * n) + 19)) * ((2 * n) + 21)) * ((2 * n) + 23)))}
def boundary_condition():
return '12th order'
def short_name():
return 'P6' |
class BsdfSerializer(object):
def __init__(self, extensions=None, **options):
self._extensions = {}
self._extensions_by_cls = {}
if (extensions is None):
extensions = standard_extensions
for extension in extensions:
self.add_extension(extension)
self._parse_options(**options)
def _parse_options(self, compression=0, use_checksum=False, float64=True, load_streaming=False, lazy_blob=False):
if isinstance(compression, string_types):
m = {'no': 0, 'zlib': 1, 'bz2': 2}
compression = m.get(compression.lower(), compression)
if (compression not in (0, 1, 2)):
raise TypeError('Compression must be 0, 1, 2, "no", "zlib", or "bz2"')
self._compression = compression
self._use_checksum = bool(use_checksum)
self._float64 = bool(float64)
self._load_streaming = bool(load_streaming)
self._lazy_blob = bool(lazy_blob)
def add_extension(self, extension_class):
if (not (isinstance(extension_class, type) and issubclass(extension_class, Extension))):
raise TypeError('add_extension() expects a Extension class.')
extension = extension_class()
name = extension.name
if (not isinstance(name, str)):
raise TypeError('Extension name must be str.')
if ((len(name) == 0) or (len(name) > 250)):
raise NameError('Extension names must be nonempty and shorter than 251 chars.')
if (name in self._extensions):
logger.warning(('BSDF warning: overwriting extension "%s", consider removing first' % name))
cls = extension.cls
if (not cls):
clss = []
elif isinstance(cls, (tuple, list)):
clss = cls
else:
clss = [cls]
for cls in clss:
if (not isinstance(cls, classtypes)):
raise TypeError('Extension classes must be types.')
for cls in clss:
self._extensions_by_cls[cls] = (name, extension.encode)
self._extensions[name] = extension
return extension_class
def remove_extension(self, name):
if (not isinstance(name, str)):
raise TypeError('Extension name must be str.')
if (name in self._extensions):
self._extensions.pop(name)
for cls in list(self._extensions_by_cls.keys()):
if (self._extensions_by_cls[cls][0] == name):
self._extensions_by_cls.pop(cls)
def _encode(self, f, value, streams, ext_id):
x = encode_type_id
if (value is None):
f.write(x(b'v', ext_id))
elif (value is True):
f.write(x(b'y', ext_id))
elif (value is False):
f.write(x(b'n', ext_id))
elif isinstance(value, integer_types):
if ((- 32768) <= value <= 32767):
f.write((x(b'h', ext_id) + spack('h', value)))
else:
f.write((x(b'i', ext_id) + spack('<q', value)))
elif isinstance(value, float):
if self._float64:
f.write((x(b'd', ext_id) + spack('<d', value)))
else:
f.write((x(b'f', ext_id) + spack('<f', value)))
elif isinstance(value, unicode_types):
bb = value.encode('UTF-8')
f.write((x(b's', ext_id) + lencode(len(bb))))
f.write(bb)
elif isinstance(value, (list, tuple)):
f.write((x(b'l', ext_id) + lencode(len(value))))
for v in value:
self._encode(f, v, streams, None)
elif isinstance(value, dict):
f.write((x(b'm', ext_id) + lencode(len(value))))
for (key, v) in value.items():
if PY3:
assert key.isidentifier()
else:
assert _isidentifier(key)
name_b = key.encode('UTF-8')
f.write(lencode(len(name_b)))
f.write(name_b)
self._encode(f, v, streams, None)
elif isinstance(value, bytes):
f.write(x(b'b', ext_id))
blob = Blob(value, compression=self._compression, use_checksum=self._use_checksum)
blob._to_file(f)
elif isinstance(value, Blob):
f.write(x(b'b', ext_id))
value._to_file(f)
elif isinstance(value, BaseStream):
if (value.mode != 'w'):
raise ValueError('Cannot serialize a read-mode stream.')
elif isinstance(value, ListStream):
f.write((x(b'l', ext_id) + spack('<BQ', 255, 0)))
else:
raise TypeError('Only ListStream is supported')
if (len(streams) > 0):
raise ValueError('Can only have one stream per file.')
streams.append(value)
value._activate(f, self._encode, self._decode)
else:
if (ext_id is not None):
raise ValueError(('Extension %s wronfully encodes object to another extension object (though it may encode to a list/dict that contains other extension objects).' % ext_id))
ex = self._extensions_by_cls.get(value.__class__, None)
if (ex is None):
for (name, c) in self._extensions.items():
if c.match(self, value):
ex = (name, c.encode)
break
else:
ex = None
if (ex is not None):
(ext_id2, extension_encode) = ex
self._encode(f, extension_encode(self, value), streams, ext_id2)
else:
t = 'Class %r is not a valid base BSDF type, nor is it handled by an extension.'
raise TypeError((t % value.__class__.__name__))
def _decode(self, f):
char = f.read(1)
c = char.lower()
if (not char):
raise EOFError()
elif (char != c):
n = strunpack('<B', f.read(1))[0]
ext_id = f.read(n).decode('UTF-8')
else:
ext_id = None
if (c == b'v'):
value = None
elif (c == b'y'):
value = True
elif (c == b'n'):
value = False
elif (c == b'h'):
value = strunpack('<h', f.read(2))[0]
elif (c == b'i'):
value = strunpack('<q', f.read(8))[0]
elif (c == b'f'):
value = strunpack('<f', f.read(4))[0]
elif (c == b'd'):
value = strunpack('<d', f.read(8))[0]
elif (c == b's'):
n_s = strunpack('<B', f.read(1))[0]
if (n_s == 253):
n_s = strunpack('<Q', f.read(8))[0]
value = f.read(n_s).decode('UTF-8')
elif (c == b'l'):
n = strunpack('<B', f.read(1))[0]
if (n >= 254):
closed = (n == 254)
n = strunpack('<Q', f.read(8))[0]
if self._load_streaming:
value = ListStream((n if closed else 'r'))
value._activate(f, self._encode, self._decode)
elif closed:
value = [self._decode(f) for i in range(n)]
else:
value = []
try:
while True:
value.append(self._decode(f))
except EOFError:
pass
else:
if (n == 253):
n = strunpack('<Q', f.read(8))[0]
value = [self._decode(f) for i in range(n)]
elif (c == b'm'):
value = dict()
n = strunpack('<B', f.read(1))[0]
if (n == 253):
n = strunpack('<Q', f.read(8))[0]
for i in range(n):
n_name = strunpack('<B', f.read(1))[0]
if (n_name == 253):
n_name = strunpack('<Q', f.read(8))[0]
assert (n_name > 0)
name = f.read(n_name).decode('UTF-8')
value[name] = self._decode(f)
elif (c == b'b'):
if self._lazy_blob:
value = Blob((f, True))
else:
blob = Blob((f, False))
value = blob.get_bytes()
else:
raise RuntimeError(('Parse error %r' % char))
if (ext_id is not None):
extension = self._extensions.get(ext_id, None)
if (extension is not None):
value = extension.decode(self, value)
else:
logger.warning(('BSDF warning: no extension found for %r' % ext_id))
return value
def encode(self, ob):
f = BytesIO()
self.save(f, ob)
return f.getvalue()
def save(self, f, ob):
f.write(b'BSDF')
f.write(struct.pack('<B', VERSION[0]))
f.write(struct.pack('<B', VERSION[1]))
streams = []
self._encode(f, ob, streams, None)
if (len(streams) > 0):
stream = streams[0]
if (stream._start_pos != f.tell()):
raise ValueError('The stream object must be the last object to be encoded.')
def decode(self, bb):
f = BytesIO(bb)
return self.load(f)
def load(self, f):
f4 = f.read(4)
if (f4 != b'BSDF'):
raise RuntimeError(('This does not look like a BSDF file: %r' % f4))
major_version = strunpack('<B', f.read(1))[0]
minor_version = strunpack('<B', f.read(1))[0]
file_version = ('%i.%i' % (major_version, minor_version))
if (major_version != VERSION[0]):
t = 'Reading file with different major version (%s) from the implementation (%s).'
raise RuntimeError((t % (__version__, file_version)))
if (minor_version > VERSION[1]):
t = 'BSDF warning: reading file with higher minor version (%s) than the implementation (%s).'
logger.warning((t % (__version__, file_version)))
return self._decode(f) |
def get_optimizer(params, opt_name, lr, beta1, beta2):
if (opt_name.lower() == 'adam'):
optim = torch.optim.Adam(params, lr, betas=(beta1, beta2))
elif (opt_name.lower() == 'nesterov'):
optim = torch.optim.SGD(params, lr, momentum=beta1, weight_decay=FLAGS.c_weight_decay, nesterov=True)
return optim |
class Decoder(chainer.Chain):
def __init__(self, nb_inputs, channel_list, ksize_list, no_act_last=False):
super(Decoder, self).__init__()
self.nb_layers = len(channel_list)
self.no_act_last = no_act_last
channel_list = (channel_list + [nb_inputs])
for (idx, (nb_in, nb_out, ksize)) in enumerate(zip(channel_list[:(- 1)], channel_list[1:], ksize_list[::(- 1)])):
self.add_link('deconv{}'.format(idx), L.DeconvolutionND(1, nb_in, nb_out, ksize))
if (no_act_last and (idx == (self.nb_layers - 1))):
continue
self.add_link('bn{}'.format(idx), L.BatchNormalization(nb_out))
def __call__(self, h):
for idx in range(self.nb_layers):
if (self.no_act_last and (idx == (self.nb_layers - 1))):
h = getattr(self, 'deconv{}'.format(idx))(h)
else:
h = F.relu(getattr(self, 'bn{}'.format(idx))(getattr(self, 'deconv{}'.format(idx))(h)))
return h |
def test_IndexedArray_nbytes():
np_index = np.array([2, 2, 0, 1, 4, 5, 4])
np_content = np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])
array = ak.contents.indexedarray.IndexedArray(ak.index.Index(np_index), ak.contents.numpyarray.NumpyArray(np_content))
assert (array.nbytes == (np_index.nbytes + np_content.nbytes)) |
class Cls_head(nn.Module):
def __init__(self, embed_dim, num_classes):
super().__init__()
self.cls = nn.Linear(embed_dim, num_classes)
def forward(self, x):
x = nn.functional.adaptive_avg_pool2d(x, 1).flatten(1)
out = self.cls(x)
return out |
def read_stm(filename):
lines = open(filename).read().splitlines()
for line in lines:
seq = parse_stm_seq(line)
if (not seq):
continue
(yield seq) |
def test_bytemaskedarray_localindex():
content = ak.operations.from_iter([[[0.0, 1.1, 2.2], [], [3.3, 4.4]], [], [[5.5]], [[6.6, 7.7, 8.8, 9.9]], [[], [10.0, 11.1, 12.2]]], highlevel=False)
mask = ak.index.Index8(np.array([0, 0, 1, 1, 0], dtype=np.int8))
v2_array = ak.contents.ByteMaskedArray(mask, content, valid_when=False)
assert (to_list(v2_array) == [[[0.0, 1.1, 2.2], [], [3.3, 4.4]], [], None, None, [[], [10.0, 11.1, 12.2]]])
assert (to_list(ak._do.local_index(v2_array, axis=0)) == [0, 1, 2, 3, 4])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=0).form == ak._do.local_index(v2_array, axis=0).form)
assert (to_list(ak._do.local_index(v2_array, axis=(- 3))) == [0, 1, 2, 3, 4])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=(- 3)).form == ak._do.local_index(v2_array, axis=(- 3)).form)
assert (to_list(ak._do.local_index(v2_array, axis=1)) == [[0, 1, 2], [], None, None, [0, 1]])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=1).form == ak._do.local_index(v2_array, axis=1).form)
assert (to_list(ak._do.local_index(v2_array, axis=(- 2))) == [[0, 1, 2], [], None, None, [0, 1]])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=(- 2)).form == ak._do.local_index(v2_array, axis=(- 2)).form)
assert (to_list(ak._do.local_index(v2_array, axis=2)) == [[[0, 1, 2], [], [0, 1]], [], None, None, [[], [0, 1, 2]]])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=2).form == ak._do.local_index(v2_array, axis=2).form)
assert (to_list(ak._do.local_index(v2_array, axis=(- 1))) == [[[0, 1, 2], [], [0, 1]], [], None, None, [[], [0, 1, 2]]])
assert (ak._do.local_index(v2_array.to_typetracer(), axis=(- 1)).form == ak._do.local_index(v2_array, axis=(- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, axis=4)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, axis=(- 4)) |
class IncreasingTableaux_shape_inf(IncreasingTableaux):
def __init__(self, p):
super().__init__(category=InfiniteEnumeratedSets())
self.shape = p
def __contains__(self, x):
return (IncreasingTableaux.__contains__(self, x) and ([len(row) for row in x] == self.shape))
def _repr_(self):
return ('Increasing tableaux of shape %s' % str(self.shape))
def __iter__(self):
i = 1
n = sum(self.shape)
while True:
if (i != 1):
for k in range(1, (n + 1)):
for c in integer_vectors_nk_fast_iter((n - k), (i - 1)):
c.append(k)
for sst in IncreasingTableaux_shape_weight(self.shape, tuple(c)):
(yield self.element_class(self, sst))
else:
for sst in IncreasingTableaux_shape_weight(self.shape, (n,)):
(yield self.element_class(self, sst))
i += 1 |
class SoftSort_p2(torch.nn.Module):
def __init__(self, tau=1.0, hard=False):
super(SoftSort_p2, self).__init__()
self.hard = hard
self.tau = tau
def forward(self, scores: Tensor):
scores = scores.unsqueeze((- 1))
sorted = scores.sort(descending=True, dim=1)[0]
pairwise_diff = (((scores.transpose(1, 2) - sorted) ** 2).neg() / self.tau)
P_hat = pairwise_diff.softmax((- 1))
if self.hard:
P = torch.zeros_like(P_hat, device=P_hat.device)
P.scatter_((- 1), P_hat.topk(1, (- 1))[1], value=1)
P_hat = ((P - P_hat).detach() + P_hat)
return P_hat |
_runner('runner_base')
class RunnerBase():
def __init__(self, cfg, task, model, datasets, job_id):
self.config = cfg
self.job_id = job_id
self.task = task
self.datasets = datasets
self._model = model
self._wrapped_model = None
self._device = None
self._optimizer = None
self._scaler = None
self._dataloaders = None
self._lr_sched = None
self.start_epoch = 0
self.setup_output_dir()
def device(self):
if (self._device is None):
self._device = torch.device(self.config.run_cfg.device)
return self._device
def use_distributed(self):
return self.config.run_cfg.distributed
def model(self):
if (self._model.device != self.device):
self._model = self._model.to(self.device)
if self.use_distributed:
if (self._wrapped_model is None):
self._wrapped_model = DDP(self._model, device_ids=[self.config.run_cfg.gpu])
else:
self._wrapped_model = self._model
return self._wrapped_model
def optimizer(self):
if (self._optimizer is None):
num_parameters = 0
(p_wd, p_non_wd) = ([], [])
for (n, p) in self.model.named_parameters():
if (not p.requires_grad):
continue
print(n)
if ((p.ndim < 2) or ('bias' in n) or ('ln' in n) or ('bn' in n)):
p_non_wd.append(p)
else:
p_wd.append(p)
num_parameters += p.data.nelement()
logging.info(('number of trainable parameters: %d' % num_parameters))
optim_params = [{'params': p_wd, 'weight_decay': float(self.config.run_cfg.weight_decay)}, {'params': p_non_wd, 'weight_decay': 0}]
beta2 = self.config.run_cfg.get('beta2', 0.999)
self._optimizer = torch.optim.AdamW(optim_params, lr=float(self.config.run_cfg.init_lr), weight_decay=float(self.config.run_cfg.weight_decay), betas=(0.9, beta2))
return self._optimizer
def scaler(self):
amp = self.config.run_cfg.get('amp', False)
if amp:
if (self._scaler is None):
self._scaler = torch.cuda.amp.GradScaler()
return self._scaler
def lr_scheduler(self):
if (self._lr_sched is None):
lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)
max_epoch = self.max_epoch
min_lr = self.min_lr
init_lr = self.init_lr
decay_rate = self.config.run_cfg.get('lr_decay_rate', None)
warmup_start_lr = self.config.run_cfg.get('warmup_lr', (- 1))
warmup_steps = self.config.run_cfg.get('warmup_steps', 0)
iters_per_epoch = self.config.run_cfg.get('iters_per_epoch', None)
if (iters_per_epoch is None):
try:
iters_per_epoch = len(self.dataloaders['train'])
except (AttributeError, TypeError):
iters_per_epoch = 10000
self._lr_sched = lr_sched_cls(optimizer=self.optimizer, max_epoch=max_epoch, iters_per_epoch=iters_per_epoch, min_lr=min_lr, init_lr=init_lr, decay_rate=decay_rate, warmup_start_lr=warmup_start_lr, warmup_steps=warmup_steps)
return self._lr_sched
def dataloaders(self) -> dict:
if (self._dataloaders is None):
logging.info('dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline).')
datasets = reorg_datasets_by_split(self.datasets)
self.datasets = datasets
for split_name in self.datasets:
if (isinstance(self.datasets[split_name], tuple) or isinstance(self.datasets[split_name], list)):
num_records = sum([(len(d) if (not (type(d) in [wds.DataPipeline, ChainDataset])) else 0) for d in self.datasets[split_name]])
elif hasattr(self.datasets[split_name], '__len__'):
num_records = len(self.datasets[split_name])
else:
num_records = (- 1)
logging.info('Only a single wds.DataPipeline dataset, no __len__ attribute.')
if (num_records >= 0):
logging.info('Loaded {} records for {} split from the dataset.'.format(num_records, split_name))
split_names = sorted(self.datasets.keys())
datasets = [self.datasets[split] for split in split_names]
is_trains = [(split in self.train_splits) for split in split_names]
batch_sizes = [(self.config.run_cfg.batch_size_train if (split == 'train') else self.config.run_cfg.batch_size_eval) for split in split_names]
collate_fns = []
for dataset in datasets:
if (isinstance(dataset, tuple) or isinstance(dataset, list)):
collate_fns.append([getattr(d, 'collater', None) for d in dataset])
else:
collate_fns.append(getattr(dataset, 'collater', None))
dataloaders = self.create_loaders(datasets=datasets, num_workers=self.config.run_cfg.num_workers, batch_sizes=batch_sizes, is_trains=is_trains, collate_fns=collate_fns)
self._dataloaders = {k: v for (k, v) in zip(split_names, dataloaders)}
return self._dataloaders
def cuda_enabled(self):
return (self.device.type == 'cuda')
def max_epoch(self):
return int(self.config.run_cfg.max_epoch)
def log_freq(self):
log_freq = self.config.run_cfg.get('log_freq', 50)
return int(log_freq)
def init_lr(self):
return float(self.config.run_cfg.init_lr)
def min_lr(self):
return float(self.config.run_cfg.min_lr)
def accum_grad_iters(self):
return int(self.config.run_cfg.get('accum_grad_iters', 1))
def valid_splits(self):
valid_splits = self.config.run_cfg.get('valid_splits', [])
if (len(valid_splits) == 0):
logging.info('No validation splits found.')
return valid_splits
def test_splits(self):
test_splits = self.config.run_cfg.get('test_splits', [])
return test_splits
def train_splits(self):
train_splits = self.config.run_cfg.get('train_splits', [])
if (len(train_splits) == 0):
logging.info('Empty train splits.')
return train_splits
def evaluate_only(self):
return self.config.run_cfg.evaluate
def use_dist_eval_sampler(self):
return self.config.run_cfg.get('use_dist_eval_sampler', True)
def resume_ckpt_path(self):
return self.config.run_cfg.get('resume_ckpt_path', None)
def train_loader(self):
train_dataloader = self.dataloaders['train']
return train_dataloader
def setup_output_dir(self):
lib_root = Path(registry.get_path('library_root'))
output_dir = ((lib_root / self.config.run_cfg.output_dir) / self.job_id)
result_dir = (output_dir / 'result')
output_dir.mkdir(parents=True, exist_ok=True)
result_dir.mkdir(parents=True, exist_ok=True)
registry.register_path('result_dir', str(result_dir))
registry.register_path('output_dir', str(output_dir))
self.result_dir = result_dir
self.output_dir = output_dir
def train(self):
start_time = time.time()
best_agg_metric = 0
best_epoch = 0
self.log_config()
if ((not self.evaluate_only) and (self.resume_ckpt_path is not None)):
self._load_checkpoint(self.resume_ckpt_path)
for cur_epoch in range(self.start_epoch, self.max_epoch):
if (not self.evaluate_only):
logging.info('Start training')
train_stats = self.train_epoch(cur_epoch)
self.log_stats(split_name='train', stats=train_stats)
if (len(self.valid_splits) > 0):
for split_name in self.valid_splits:
logging.info('Evaluating on {}.'.format(split_name))
val_log = self.eval_epoch(split_name=split_name, cur_epoch=cur_epoch)
if (val_log is not None):
if is_main_process():
assert ('agg_metrics' in val_log), 'No agg_metrics found in validation log.'
agg_metrics = val_log['agg_metrics']
if ((agg_metrics > best_agg_metric) and (split_name == 'val')):
(best_epoch, best_agg_metric) = (cur_epoch, agg_metrics)
self._save_checkpoint(cur_epoch, is_best=True)
val_log.update({'best_epoch': best_epoch})
self.log_stats(val_log, split_name)
elif (not self.evaluate_only):
self._save_checkpoint(cur_epoch, is_best=False)
if self.evaluate_only:
break
if self.config.run_cfg.distributed:
dist.barrier()
test_epoch = ('best' if (len(self.valid_splits) > 0) else cur_epoch)
self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info('Training time {}'.format(total_time_str))
def evaluate(self, cur_epoch='best', skip_reload=False):
test_logs = dict()
if (len(self.test_splits) > 0):
for split_name in self.test_splits:
test_logs[split_name] = self.eval_epoch(split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload)
return test_logs
def train_epoch(self, epoch):
self.model.train()
return self.task.train_epoch(epoch=epoch, model=self.model, data_loader=self.train_loader, optimizer=self.optimizer, scaler=self.scaler, lr_scheduler=self.lr_scheduler, cuda_enabled=self.cuda_enabled, log_freq=self.log_freq, accum_grad_iters=self.accum_grad_iters)
_grad()
def eval_epoch(self, split_name, cur_epoch, skip_reload=False):
data_loader = self.dataloaders.get(split_name, None)
assert data_loader, 'data_loader for split {} is None.'.format(split_name)
model = self.unwrap_dist_model(self.model)
if ((not skip_reload) and (cur_epoch == 'best')):
model = self._reload_best_model(model)
model.eval()
self.task.before_evaluation(model=model, dataset=self.datasets[split_name])
results = self.task.evaluation(model, data_loader)
if (results is not None):
return self.task.after_evaluation(val_result=results, split_name=split_name, epoch=cur_epoch)
def unwrap_dist_model(self, model):
if self.use_distributed:
return model.module
else:
return model
def create_loaders(self, datasets, num_workers, batch_sizes, is_trains, collate_fns, dataset_ratios=None):
def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):
if (isinstance(dataset, ChainDataset) or isinstance(dataset, wds.DataPipeline)):
loader = iter(DataLoader(dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True))
else:
if self.use_distributed:
sampler = DistributedSampler(dataset, shuffle=is_train, num_replicas=get_world_size(), rank=get_rank())
if (not self.use_dist_eval_sampler):
sampler = (sampler if is_train else None)
else:
sampler = None
loader = DataLoader(dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, sampler=sampler, shuffle=((sampler is None) and is_train), collate_fn=collate_fn, drop_last=(True if is_train else False))
loader = PrefetchLoader(loader)
if is_train:
loader = IterLoader(loader, use_distributed=self.use_distributed)
return loader
loaders = []
for (dataset, bsz, is_train, collate_fn) in zip(datasets, batch_sizes, is_trains, collate_fns):
if (isinstance(dataset, list) or isinstance(dataset, tuple)):
if (hasattr(dataset[0], 'sample_ratio') and (dataset_ratios is None)):
dataset_ratios = [d.sample_ratio for d in dataset]
loader = MultiIterLoader(loaders=[_create_loader(d, num_workers, bsz, is_train, collate_fn[i]) for (i, d) in enumerate(dataset)], ratios=dataset_ratios)
else:
loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)
loaders.append(loader)
return loaders
_process
def _save_checkpoint(self, cur_epoch, is_best=False):
model_no_ddp = self.unwrap_dist_model(self.model)
param_grad_dic = {k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()}
state_dict = model_no_ddp.state_dict()
for k in list(state_dict.keys()):
if ((k in param_grad_dic.keys()) and (not param_grad_dic[k])):
del state_dict[k]
save_obj = {'model': state_dict, 'optimizer': self.optimizer.state_dict(), 'config': self.config.to_dict(), 'scaler': (self.scaler.state_dict() if self.scaler else None), 'epoch': cur_epoch}
save_to = os.path.join(self.output_dir, 'checkpoint_{}.pth'.format(('best' if is_best else cur_epoch)))
logging.info('Saving checkpoint at epoch {} to {}.'.format(cur_epoch, save_to))
torch.save(save_obj, save_to)
def _reload_best_model(self, model):
checkpoint_path = os.path.join(self.output_dir, 'checkpoint_best.pth')
logging.info('Loading checkpoint from {}.'.format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location='cpu')
try:
model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
logging.warning('\n Key mismatch when loading checkpoint. This is expected if only part of the model is saved.\n Trying to load the model with strict=False.\n ')
model.load_state_dict(checkpoint['model'], strict=False)
return model
def _load_checkpoint(self, url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError('checkpoint url or path is invalid')
state_dict = checkpoint['model']
self.unwrap_dist_model(self.model).load_state_dict(state_dict, strict=False)
self.optimizer.load_state_dict(checkpoint['optimizer'])
if (self.scaler and ('scaler' in checkpoint)):
self.scaler.load_state_dict(checkpoint['scaler'])
self.start_epoch = (checkpoint['epoch'] + 1)
logging.info('Resume checkpoint from {}'.format(url_or_filename))
_process
def log_stats(self, stats, split_name):
if isinstance(stats, dict):
log_stats = {**{f'{split_name}_{k}': v for (k, v) in stats.items()}}
with open(os.path.join(self.output_dir, 'log.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
elif isinstance(stats, list):
pass
_process
def log_config(self):
with open(os.path.join(self.output_dir, 'log.txt'), 'a') as f:
f.write((json.dumps(self.config.to_dict(), indent=4) + '\n')) |
class ChineseCLIPVisionModelTester():
def __init__(self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
num_patches = ((image_size // patch_size) ** 2)
self.seq_length = (num_patches + 1)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return (config, pixel_values)
def get_config(self):
return ChineseCLIPVisionConfig(image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range)
def create_and_check_model(self, config, pixel_values):
model = ChineseCLIPVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, (num_patches + 1), self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, pixel_values) = config_and_inputs
inputs_dict = {'pixel_values': pixel_values}
return (config, inputs_dict) |
def test_ArrayBuilder_non_picklable_behavior():
def make_add_xyr():
def add_xyr(left, right):
x = (left.x + right.x)
y = (left.y + right.y)
return ak.zip({'x': x, 'y': y, 'r': np.sqrt(((x ** 2) + (y ** 2)))}, with_name='xyr')
return add_xyr
behavior = {(np.add, 'xyr', 'xyr'): make_add_xyr()}
builder = ak.ArrayBuilder(behavior=behavior)
result = func(builder)
assert (result.behavior.keys() == behavior.keys()) |
def _check_stack(path):
try:
for frame in inspect.getouterframes(inspect.currentframe(), 0):
if (path in frame[1].replace(os.sep, '/')):
return True
except Exception:
pass
return False |
def split_bn_bias(layer_groups):
split_groups = []
for l in layer_groups:
(l1, l2) = ([], [])
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups |
def LocalMcLaughlinGraph():
g = McLaughlinGraph()
orbits = g.automorphism_group().stabilizer(1).orbits()
orbit = [x for x in orbits if (len(x) == 162)][0]
g = g.subgraph(vertices=orbit)
g.relabel()
g.name('Local McLaughlin Graph')
return g |
class BaseModel(nn.Module):
def __init__(self, num_vars, num_layers, hid_dim, num_params, nonlin='leaky-relu', intervention=False, intervention_type='perfect', intervention_knowledge='known', num_regimes=1):
super(BaseModel, self).__init__()
self.num_vars = num_vars
self.num_layers = num_layers
self.hid_dim = hid_dim
self.num_params = num_params
self.nonlin = nonlin
self.gumbel = True
self.intervention = intervention
self.intervention_type = intervention_type
self.intervention_knowledge = intervention_knowledge
self.num_regimes = num_regimes
self.weights = nn.ParameterList()
self.biases = nn.ParameterList()
self.extra_params = []
if (not ((not self.intervention) or (self.intervention and (self.intervention_type == 'perfect') and (self.intervention_knowledge == 'known')) or (self.intervention and (self.intervention_type == 'perfect') and (self.intervention_knowledge == 'unknown')) or (self.intervention and (self.intervention_type == 'imperfect') and (self.intervention_knowledge == 'known')))):
raise ValueError('Not implemented')
if (not self.intervention):
print('No intervention')
self.intervention_type = 'perfect'
self.intervention_knowledge = 'known'
self.adjacency = (torch.ones((self.num_vars, self.num_vars)) - torch.eye(self.num_vars))
self.gumbel_adjacency = GumbelAdjacency(self.num_vars)
if ((self.intervention_knowledge == 'unknown') and self.intervention):
self.gumbel_interv_w = GumbelIntervWeight(self.num_vars, self.num_regimes)
self.zero_weights_ratio = 0.0
self.numel_weights = 0
for i in range((self.num_layers + 1)):
in_dim = self.hid_dim
out_dim = self.hid_dim
if (i == 0):
in_dim = self.num_vars
if (i == self.num_layers):
out_dim = self.num_params
if (self.intervention and ((self.intervention_type == 'imperfect') or (self.intervention_knowledge == 'unknown'))):
self.weights.append(nn.Parameter(torch.zeros(self.num_vars, out_dim, in_dim, self.num_regimes)))
self.biases.append(nn.Parameter(torch.zeros(self.num_vars, out_dim, self.num_regimes)))
self.numel_weights += (((self.num_vars * out_dim) * in_dim) * self.num_regimes)
elif ((not self.intervention) or (self.intervention_type == 'perfect')):
self.weights.append(nn.Parameter(torch.zeros(self.num_vars, out_dim, in_dim)))
self.biases.append(nn.Parameter(torch.zeros(self.num_vars, out_dim)))
self.numel_weights += ((self.num_vars * out_dim) * in_dim)
else:
if (self.intervention_type not in ['perfect', 'imperfect']):
raise ValueError(f'{intervention_type} is not a valid for intervention type')
if (self.intervention_knowledge not in ['known', 'unknown']):
raise ValueError(f'{intervention_knowledge} is not a valid value for intervention knowledge')
def get_interv_w(self, bs, regime):
return self.gumbel_interv_w(bs, regime)
def forward_given_params(self, x, weights, biases, mask=None, regime=None):
bs = x.size(0)
num_zero_weights = 0
for layer in range((self.num_layers + 1)):
if (layer == 0):
M = self.gumbel_adjacency(bs)
adj = self.adjacency.unsqueeze(0)
if (not self.intervention):
x = (torch.einsum('tij,bjt,ljt,bj->bti', weights[layer], M, adj, x) + biases[layer])
elif ((self.intervention_type == 'perfect') and (self.intervention_knowledge == 'known')):
x = (torch.einsum('tij,bjt,ljt,bj->bti', weights[layer], M, adj, x) + biases[layer])
else:
assert (mask is not None), 'Mask is not set!'
assert (regime is not None), 'Regime is not set!'
regime = torch.from_numpy(regime)
R = mask
if (self.intervention_knowledge == 'unknown'):
self.interv_w = self.gumbel_interv_w(bs, regime)
R = self.interv_w
M = torch.einsum('bjt,bt->bjt', M, R)
R = (1 - R).type(torch.int64)
R = (R * regime.unsqueeze(1))
R = torch.zeros(R.size(0), self.num_vars, self.num_regimes).scatter_(2, R.unsqueeze(2), 1)
w = torch.einsum('tijk, btk -> btij', weights[layer], R)
x = torch.einsum('btij, bjt, ljt, bj -> bti', w, M, adj, x)
x += torch.einsum('btk,tik->bti', R, biases[layer])
elif (self.intervention and ((self.intervention_type == 'imperfect') or (self.intervention_knowledge == 'unknown'))):
w = torch.einsum('tijk, btk -> btij', weights[layer], R)
x = torch.einsum('btij, btj -> bti', w, x)
x += torch.einsum('btk,tik->bti', R, biases[layer])
else:
x = (torch.einsum('tij,btj->bti', weights[layer], x) + biases[layer])
num_zero_weights += (weights[layer].numel() - weights[layer].nonzero().size(0))
if (layer != self.num_layers):
x = (F.leaky_relu(x) if (self.nonlin == 'leaky-relu') else torch.sigmoid(x))
self.zero_weights_ratio = (num_zero_weights / float(self.numel_weights))
return torch.unbind(x, 1)
def get_w_adj(self):
return (self.gumbel_adjacency.get_proba() * self.adjacency)
def reset_params(self):
with torch.no_grad():
for node in range(self.num_vars):
for (i, w) in enumerate(self.weights):
w = w[node]
nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('leaky_relu'))
for (i, b) in enumerate(self.biases):
b = b[node]
b.zero_()
def get_parameters(self, mode='wbx'):
params = []
if ('w' in mode):
weights = []
for w in self.weights:
weights.append(w)
params.append(weights)
if ('b' in mode):
biases = []
for b in self.biases:
biases.append(b)
params.append(biases)
if ('x' in mode):
extra_params = []
for ep in self.extra_params:
if ep.requires_grad:
extra_params.append(ep)
params.append(extra_params)
return tuple(params)
def set_parameters(self, params, mode='wbx'):
with torch.no_grad():
k = 0
if ('w' in mode):
for (i, w) in enumerate(self.weights):
w.copy_(params[k][i])
k += 1
if ('b' in mode):
for (i, b) in enumerate(self.biases):
b.copy_(params[k][i])
k += 1
if (('x' in mode) and (len(self.extra_params) > 0)):
for (i, ep) in enumerate(self.extra_params):
if ep.requires_grad:
ep.copy_(params[k][i])
k += 1
def get_grad_norm(self, mode='wbx'):
grad_norm = 0
if ('w' in mode):
for w in self.weights:
grad_norm += torch.sum((w.grad ** 2))
if ('b' in mode):
for b in self.biases:
grad_norm += torch.sum((b.grad ** 2))
if ('x' in mode):
for ep in self.extra_params:
if ep.requires_grad:
grad_norm += torch.sum((ep.grad ** 2))
return torch.sqrt(grad_norm)
def save_parameters(self, exp_path, mode='wbx'):
params = self.get_parameters(mode=mode)
with open(os.path.join(exp_path, ('params_' + mode)), 'wb') as f:
pickle.dump(params, f)
def load_parameters(self, exp_path, mode='wbx'):
with open(os.path.join(exp_path, ('params_' + mode)), 'rb') as f:
params = pickle.load(f)
self.set_parameters(params, mode=mode)
def get_distribution(self, density_params):
raise NotImplementedError |
def ic03_lex(lexdir, lex_type, gt_file, submit_file, name_file):
gt = open(gt_file, 'r')
gt_lines = gt.readlines()
gt.close()
sub = open(submit_file, 'r')
sub_lines = sub.readlines()
sub.close()
imgname = open(name_file, 'r')
img_lines = imgname.readlines()
imgname.close()
if (lex_type == 'full'):
sub_file = (submit_file[:(- 4)] + '_full.txt')
sub_fout = open(sub_file, 'w')
for i in range(len(gt_lines)):
(suf, gt, _) = gt_lines[i].strip().split('"')
sub = sub_lines[i].strip().split('"')[1]
lex_file = open(os.path.join(lexdir, 'lexicon_Full.txt'), 'r')
lex = lex_file.readlines()
lex_file.close()
min_dis = 10000
min_word = sub
for word in lex:
word = word.strip()
word = word.lower()
dis = editdistance.eval(sub, word)
if (dis < min_dis):
min_word = word
min_dis = dis
sub_fout.write((((suf + '"') + str(min_word)) + '"\n'))
sub_fout.close()
else:
sub_file = (submit_file[:(- 4)] + '_50.txt')
sub_fout = open(sub_file, 'w')
for i in range(len(gt_lines)):
base_name = img_lines[i].strip().split('.')[0]
(suf, gt, _) = gt_lines[i].strip().split('"')
sub = sub_lines[i].strip().split('"')[1]
lex_file = open(os.path.join(lexdir, 'lexicon_50', (((('lexicon_' + base_name) + '_') + gt) + '.txt')), 'r')
lex = lex_file.readlines()
lex_file.close()
min_dis = 10000
min_word = sub
for word in lex:
word = word.strip()
word = word.lower()
dis = editdistance.eval(sub, word)
if (dis < min_dis):
min_word = word
min_dis = dis
sub_fout.write((((suf + '"') + str(min_word)) + '"\n'))
sub_fout.close()
return sub_file |
def _lcs(x, y):
(n, m) = (len(x), len(y))
table = dict()
for i in range((n + 1)):
for j in range((m + 1)):
if ((i == 0) or (j == 0)):
table[(i, j)] = 0
elif (x[(i - 1)] == y[(j - 1)]):
table[(i, j)] = (table[((i - 1), (j - 1))] + 1)
else:
table[(i, j)] = max(table[((i - 1), j)], table[(i, (j - 1))])
return table |
class Average(Metric):
def __init__(self):
self._count = 0
self._sum = 0
def __call__(self, value: torch.Tensor):
(value,) = self.detach_tensors(value)
self._count += 1
self._sum += value
def get_metric(self, reset: bool=False):
if (self._count == 0):
value = 0
else:
value = (self._sum / self._count)
if reset:
self._count = 0
self._sum = 0
return value |
class InvertedResidual(nn.Module):
def __init__(self, inp, outp, stride, expand_ratio, double_side=False, master_layer=None):
super(InvertedResidual, self).__init__()
assert (stride in [1, 2])
expand_inp = (inp * expand_ratio)
if (expand_ratio != 1):
l0 = ReLUClipFXQConvBN(inp, expand_inp, 1, 1, 0, double_side=double_side, master_layer=master_layer)
l1 = ReLUClipFXQConvBN(expand_inp, expand_inp, 3, stride, 1, groups=expand_inp)
l2 = ReLUClipFXQConvBN(expand_inp, outp, 1, 1, 0)
layers = [l0, l1, l2]
else:
l1 = ReLUClipFXQConvBN(expand_inp, expand_inp, 3, stride, 1, groups=expand_inp, double_side=double_side, master_layer=master_layer)
l2 = ReLUClipFXQConvBN(expand_inp, outp, 1, 1, 0)
layers = [l1, l2]
self.body = nn.Sequential(*layers)
self.layer_dict = {}
self.residual_connection = ((stride == 1) and (inp == outp))
if self.residual_connection:
self.set_master_layer(self.body[0])
else:
self.set_master_layer(None)
def forward(self, x):
res = self.body(x)
if self.residual_connection:
if (getattr(FLAGS, 'int_infer', False) and (not self.training)):
res_fraclen = res.output_fraclen
x_fraclen = x.output_fraclen
output_fraclen = max(res_fraclen, x_fraclen)
res = (res * (2 ** output_fraclen))
x = (x * (2 ** output_fraclen))
res += x
res = torch.clamp(res, max=((1 << 31) - 1), min=((- (1 << 31)) + 1))
res = (res / (2 ** output_fraclen))
setattr(res, 'output_fraclen', output_fraclen)
else:
res += x
return res
def set_master_layer(self, master_layer):
self.layer_dict['master'] = master_layer
def get_master_layer(self):
return self.layer_dict['master']
def set_following_layer(self, following_layer):
self.layer_dict['following'] = following_layer
for idx in range((len(self.body) - 1)):
self.body[idx].set_following_layer(self.body[(idx + 1)])
self.body[(- 1)].set_following_layer(following_layer)
def get_following_layer(self):
return self.layer_dict['following']
def master_child(self):
return self.body[0]
def int_block(self):
layers = []
layers.append(self.body[0].int_conv())
for layer_ in self.body[1:]:
layers.append(nn.ReLU(inplace=True))
layers.append(layer_.int_conv())
body = nn.Sequential(*layers)
residual_connection = self.residual_connection
return IntBlock(body, residual_connection) |
def _rebuild_xla_tensor(data, dtype, device, requires_grad):
tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
tensor.requires_grad = requires_grad
return tensor |
class CBNetBase(BaseModule):
def _freeze_stages(self):
for m in self.cb_modules:
m._freeze_stages()
def init_cb_weights(self):
raise NotImplementedError
def init_weights(self):
self.init_cb_weights()
for m in self.cb_modules:
m.init_weights()
def _forward_cb_feats(self, feats, spatial_info):
raise NotImplementedError
def forward(self, x):
outs_list = []
cb_feats = None
pre_outs = None
for (i, module) in enumerate(self.cb_modules):
(pre_outs, spatial_info) = module(x, cb_feats, pre_outs)
outs = [pre_outs[(j + 1)] for j in self.out_indices]
outs_list.append(tuple(outs))
if (i < (len(self.cb_modules) - 1)):
cb_feats = self._forward_cb_feats(pre_outs, spatial_info)
return tuple(outs_list)
def train(self, mode=True):
super().train(mode)
for m in self.cb_modules:
m.train(mode=mode)
self._freeze_stages()
for m in self.cb_linears.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class Chunk(NER):
def __init__(self):
super().__init__(label_idx=(- 2))
def get_labels(self, path: str) -> List[str]:
if path:
with open(path, 'r') as f:
labels = f.read().splitlines()
if ('O' not in labels):
labels = (['O'] + labels)
return labels
else:
return ['O', 'B-ADVP', 'B-INTJ', 'B-LST', 'B-PRT', 'B-NP', 'B-SBAR', 'B-VP', 'B-ADJP', 'B-CONJP', 'B-PP', 'I-ADVP', 'I-INTJ', 'I-LST', 'I-PRT', 'I-NP', 'I-SBAR', 'I-VP', 'I-ADJP', 'I-CONJP', 'I-PP'] |
def kl_latent_space_student(v, z, log_det_J):
d = z.shape[(- 1)]
loss = 0.0
loss -= (d * tf.math.lgamma((0.5 * (v + 1))))
loss += (d * tf.math.lgamma(((0.5 * v) + 1e-15)))
loss += ((0.5 * d) * tf.math.log((v + 1e-15)))
loss += ((0.5 * (v + 1)) * tf.reduce_sum(tf.math.log1p(((z ** 2) / tf.expand_dims(v, axis=(- 1)))), axis=(- 1)))
loss -= log_det_J
mean_loss = tf.reduce_mean(loss)
return mean_loss |
class AI21Jurassic2JumboWindowService(AI21WindowService):
def max_sequence_length(self) -> int:
return 6000 |
def validate_no_mva(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(mva.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(mva.is_valid)
else:
return df.applymap(mva.is_valid)
return mva.is_valid(df) |
def test_encode():
assert (encode('e', 'latin1') == b'\xe9')
assert (encode('e', 'utf8') == b'\xc3\xa9')
assert (encode('e', 'utf8') == b'\xc3\xa9')
assert (encode('e', 'utf-16') == b'\xe9\x00')
assert (encode('e', 'utf-16le') == b'\xe9\x00')
assert (encode('e', 'utf-16be') == b'\x00\xe9') |
('sdv.single_table.utils.warnings')
def test_check_num_rows_valid(warning_mock):
num_rows = 5
expected_num_rows = 5
is_reject_sampling = True
max_tries = 1
check_num_rows(num_rows, expected_num_rows, is_reject_sampling, max_tries)
assert (warning_mock.warn.call_count == 0) |
.parametrize('heuristic', ['dist', 'dist_grid'])
.parametrize('classifier', ['lr', 'svmrbf'])
def test_generation_all(file_factory, heuristic, classifier):
with file_factory() as results_file:
if (classifier == 'svmrbf'):
reduce_classifier = False
else:
reduce_classifier = True
log = invoke_bots_script('100', output_pickle=results_file.name, bins=20, confidence_level=0.5, graph='all', iter_lim=2, heuristic=heuristic, classifier=classifier, reduce_classifier=reduce_classifier, human_dataset_template=(DATA_PATH + '/humans/humans.{}.csv'), bot_dataset_template=(DATA_PATH + '/bots/bots.{}.csv'))
assert ('found' in log)
results = pd.read_pickle(results_file.name)
assert (len(results[0]['search_results'].found) > 0)
assert (len(results[0]['search_results']) >= 41) |
class MGSingleEnv(object):
def __init__(self, args):
self.num_agents = args.num_agents
self.agents = [Agent(args.history_length) for i in range(self.num_agents)]
self.current_time = 0
self.coop_num = 0
self.defect_num = 0
self.coopdefect_num = 0
self.defectcoop_num = 0
self.history_length = args.history_length
self.game_length = args.game_length
self.critic_full_obs = args.critic_full_obs
self.num_opponents = args.num_opponents
assert (self.history_length <= self.game_length)
if (args.env_name == 'StagHuntSingle'):
self.reward_matrix = [[0.4, (- 5.0)], [0.3, 0.1]]
elif (args.env_name == 'PrisonerDilemmaSingle'):
self.reward_matrix = [[0.3, 0.0], [0.4, 0.1]]
elif (args.env_name == 'ChickenSingle'):
self.reward_matrix = [[0.3, 0.1], [0.4, 0.0]]
else:
raise NotImplementedError
self.action_space = []
self.observation_space = []
for agent in self.agents:
self.action_space.append(agent.action_space)
if self.critic_full_obs:
agent.observation_space = [((self.history_length * 2) + self.num_opponents)]
self.observation_space.append(agent.observation_space)
self.share_reward = args.share_reward
self.shape_reward = args.shape_reward
self.shape_beta = args.shape_beta
self.limit_game = args.limit_game
def reset(self, choose=(- 1)):
self.current_time = 0
self.coop_num = 0
self.defect_num = 0
self.coopdefect_num = 0
self.defectcoop_num = 0
for agent in self.agents:
agent.state = ([(- 1)] * self.history_length)
agent.reward = 0
agent.done = False
if (choose == (- 1)):
self.select_opponnet = np.random.randint(0, self.num_opponents)
else:
self.select_opponent = choose
if (self.select_opponent == 0):
self.opponent = CoopAgent(self.history_length)
elif (self.select_opponent == 1):
self.opponent = DefectAgent(self.history_length)
elif (self.select_opponent == 2):
self.opponent = TFTAgent(self.history_length)
elif (self.select_opponent == 3):
self.opponent = RandomAgent(self.history_length)
else:
raise NotImplementedError
self.opponent.state = ([(- 1)] * self.history_length)
self.opponent.reward = 0
self.opponent.done = False
self.agents[0].obs = np.array((self.agents[0].state + self.opponent.state))
self.opponent.obs = np.array((self.opponent.state + self.agents[0].state))
select_opponent_obs = np.zeros(self.num_opponents)
select_opponent_obs[self.select_opponent] = 1
obs_n = []
obs_critic_n = []
if self.critic_full_obs:
for agent in self.agents:
obs_n.append(np.append(agent.obs, np.zeros(self.num_opponents)))
obs_critic = np.append(agent.obs, select_opponent_obs)
obs_critic_n.append(obs_critic)
return (obs_n, obs_critic_n, self.select_opponent)
else:
for agent in self.agents:
obs_n.append(agent.obs)
return (obs_n, self.select_opponent)
def step(self, action_n):
obs_n = []
obs_critic_n = []
reward_n = []
done_n = []
info_n = {'coop&coop_num': [], 'defect&defect_num': [], 'coop&defect_num': [], 'defect&coop_num': []}
self.current_time += 1
for (i, agent) in enumerate(self.agents):
agent.action = np.argmax(action_n[i])
agent.state.pop(0)
agent.state.append(agent.action)
agent.done = ((self.current_time >= self.game_length) if self.limit_game else False)
self.opponent.action = self.opponent.act()
self.opponent.agent0_action = self.agents[0].action
self.opponent.state.pop(0)
self.opponent.state.append(self.opponent.action)
self.opponent.done = ((self.current_time >= self.game_length) if self.limit_game else False)
self.agents[0].obs = np.array((self.agents[0].state + self.opponent.state))
self.opponent.obs = np.array((self.opponent.state + self.agents[0].state))
self.agents[0].reward = self.reward_matrix[self.agents[0].action][self.opponent.action]
self.opponent.reward = self.reward_matrix[self.opponent.action][self.agents[0].action]
if ((self.agents[0].action == COOP) and (self.opponent.action == COOP)):
self.coop_num += 1
elif ((self.agents[0].action == DEFECT) and (self.opponent.action == DEFECT)):
self.defect_num += 1
elif ((self.agents[0].action == COOP) and (self.opponent.action == DEFECT)):
self.coopdefect_num += 1
else:
self.defectcoop_num += 1
select_opponent_obs = np.zeros(self.num_opponents)
select_opponent_obs[self.select_opponent] = 1
for agent in self.agents:
if self.critic_full_obs:
obs_n.append(np.append(agent.obs, np.zeros(self.num_opponents)))
obs_critic_n.append(np.append(agent.obs, select_opponent_obs))
else:
obs_n.append(agent.obs)
reward_n.append(agent.reward)
done_n.append(agent.done)
'\n if self.critic_full_obs:\n obs_n.append(np.append(self.opponent.obs, -1))\n obs_critic_n.append(np.append(self.opponent.obs, self.select_opponent)) \n else:\n obs_n.append(self.opponent.obs)\n reward_n.append(self.opponent.reward)\n done_n.append(self.opponent.done)\n '
info_n['coop&coop_num'] = self.coop_num
info_n['defect&defect_num'] = self.defect_num
info_n['coop&defect_num'] = self.coopdefect_num
info_n['defect&coop_num'] = self.defectcoop_num
global_reward = np.sum(reward_n)
if self.share_reward:
reward_n = ([global_reward] * self.num_agents)
if self.shape_reward:
reward_n = list(map((lambda x: ((x[0] * self.shape_beta) + (x[1] * (1 - self.shape_beta)))), zip(([global_reward] * self.num_agents), reward_n)))
if self.critic_full_obs:
return (obs_n, obs_critic_n, self.select_opponent, reward_n, done_n, info_n)
else:
return (obs_n, self.select_opponent, reward_n, done_n, info_n)
def seed(self, seed):
if (seed is None):
np.random.seed(1)
else:
np.random.seed(seed)
def close(self):
self.agents = []
return None |
class XLearner(MetaLearner):
def __init__(self, outcome_learners: Optional[Sequence[AutoML]]=None, effect_learners: Optional[Sequence[AutoML]]=None, propensity_learner: Optional[AutoML]=None, base_task: Optional[Task]=None, timeout: Optional[int]=None, cpu_limit: int=4, gpu_ids: Optional[str]='all'):
if (((outcome_learners is None) or (len(outcome_learners) == 0)) and (base_task is None)):
raise RuntimeError('Must specify any of learners or "base_task"')
if ((outcome_learners is not None) and (len(outcome_learners) > 0)):
base_task = self._get_task(outcome_learners[0])
super().__init__(self._get_task(outcome_learners[0]))
super().__init__(base_task, timeout, cpu_limit, gpu_ids)
self.learners: Dict[(str, Union[(Dict[(str, AutoML)], AutoML)])] = {'outcome': {}, 'effect': {}}
if (propensity_learner is None):
self.learners['propensity'] = self._get_default_learner(Task('binary'))
else:
self.learners['propensity'] = propensity_learner
if ((outcome_learners is None) or (len(outcome_learners) == 0)):
self.learners['outcome']['control'] = self._get_default_learner(self.base_task)
self.learners['outcome']['treatment'] = self._get_default_learner(self.base_task)
elif (len(outcome_learners) == 1):
self.learners['outcome']['control'] = outcome_learners[0]
self.learners['outcome']['treatment'] = copy.deepcopy(outcome_learners[0])
elif (len(outcome_learners) == 2):
self.learners['outcome']['control'] = outcome_learners[0]
self.learners['outcome']['treatment'] = outcome_learners[1]
else:
raise RuntimeError('The number of "outcome_learners" must be 0/1/2')
if ((effect_learners is None) or (len(effect_learners) == 0)):
self.learners['effect']['control'] = self._get_default_learner(Task('reg'))
self.learners['effect']['treatment'] = self._get_default_learner(Task('reg'))
elif (len(effect_learners) == 1):
self.learners['effect']['control'] = effect_learners[0]
self.learners['effect']['treatment'] = copy.deepcopy(effect_learners[0])
elif (len(effect_learners) == 2):
self.learners['effect']['control'] = effect_learners[0]
self.learners['effect']['treatment'] = effect_learners[1]
else:
raise RuntimeError('The number of "effect_learners" must be 0/1/2')
def _fit(self, train_data: DataFrame, roles: Dict, verbose: int=0):
self._fit_propensity_learner(train_data, roles, verbose)
self._fit_outcome_learners(train_data, roles, verbose)
self._fit_effect_learners(train_data, roles, verbose)
def _fit_propensity_learner(self, train_data: DataFrame, roles: Dict, verbose: int=0):
propensity_roles = copy.deepcopy(roles)
(target_role, target_col) = _get_target_role(roles)
propensity_roles.pop(target_role)
(treatment_role, treatment_col) = _get_treatment_role(roles)
propensity_roles.pop(treatment_role)
propensity_roles['target'] = treatment_col
train_cp = train_data.copy()
train_cp.drop(target_col, axis=1, inplace=True)
self.learners['propensity'].fit_predict(train_cp, propensity_roles, verbose=verbose)
def _fit_outcome_learners(self, train_data: DataFrame, roles: Dict, verbose: int=0):
(treatment_role, treatment_col) = _get_treatment_role(roles)
outcome_roles = copy.deepcopy(roles)
outcome_roles.pop(treatment_role)
for (group_name, outcome_learner) in self.learners['outcome'].items():
self._check_timer()
group = (1 if (group_name == 'treatment') else 0)
train_data_outcome = train_data[(train_data[treatment_col] == group)].copy()
train_data_outcome.drop(treatment_col, axis=1, inplace=True)
outcome_learner.fit_predict(train_data_outcome, outcome_roles, verbose=verbose)
def _fit_effect_learners(self, train_data: DataFrame, roles: Dict, verbose: int=0):
(treatment_role, treatment_col) = _get_treatment_role(roles)
(_, target_col) = _get_target_role(roles)
effect_roles: Dict = copy.deepcopy(roles)
effect_roles.pop(treatment_role)
for (group_name, effect_learner) in self.learners['effect'].items():
self._check_timer()
group = (1 if (group_name == 'treatment') else 0)
opposite_group_name = ('treatment' if (group_name == 'control') else 'control')
train_data_effect = train_data[(train_data[treatment_col] == group)].copy()
train_data_effect.drop(treatment_col, axis=1, inplace=True)
outcome_pred = self.learners['outcome'][opposite_group_name].predict(train_data_effect).data.ravel()
train_data_effect[target_col] = (train_data_effect[target_col] - outcome_pred)
if (group_name == 'control'):
train_data_effect[target_col] *= (- 1)
train_data_effect = train_data_effect[train_data_effect[target_col].notnull()]
effect_learner.fit_predict(train_data_effect, effect_roles, verbose=verbose)
def _predict(self, data: Any) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
outcome_control_pred = self.learners['outcome']['control'].predict(data).data.ravel()
outcome_treatment_pred = self.learners['outcome']['treatment'].predict(data).data.ravel()
propensity_score = self.learners['propensity'].predict(data).data.ravel()
uplift_control_pred = self.learners['effect']['control'].predict(data).data.ravel()
uplift_treatment_pred = self.learners['effect']['treatment'].predict(data).data.ravel()
uplift = ((propensity_score * uplift_treatment_pred) + ((1.0 - propensity_score) * uplift_control_pred))
return (uplift, outcome_treatment_pred, outcome_control_pred) |
def gen_loopblocking(nested_loop_desc, resource, part, constraint, cost, options):
bufshr = BufShrScheme(resource.proc_region, part, nested_loop_desc.data_loops)
if (options.sw_solve_loopblocking and (nested_loop_desc.data_loops == ConvLayer.data_loops())):
gen = loop_blocking_solver.gen_loopblocking_gbuf_reside
for (bl_ts, bl_ords) in gen(nested_loop_desc, resource, options):
lbs = LoopBlockingScheme(nested_loop_desc, bl_ts, bl_ords, resource, bufshr, options)
if constraint.is_valid_top_bl(lbs.bl_ts[0], lbs.bl_ords[0]):
(yield lbs)
return
results = []
def retrieve_result():
for r in results:
for t in r.get(timeout=3600):
(yield t)
def retrieve_result_st():
for r in results:
for t in r:
(yield t)
if (options.nprocesses > 1):
pool = Pool(processes=options.nprocesses)
apply_func = pool.apply_async
retrieve_func = retrieve_result()
else:
pool = None
apply_func = util.apply
retrieve_func = retrieve_result_st()
gen_tifm = util.factorize(nested_loop_desc.loopcnt[le.IFM], 3)
gen_tofm = util.factorize(nested_loop_desc.loopcnt[le.OFM], 3)
gen_tbat = util.factorize(nested_loop_desc.loopcnt[le.BAT], 3)
gen_ords = itertools.product(itertools.permutations(range(le.NUM)), itertools.permutations(range(le.NUM)))
list_tbat = list(gen_tbat)
list_ords = list(gen_ords)
for (tifm, tofm) in itertools.product(gen_tifm, gen_tofm):
r = apply_func(_gen_loopblocking_perprocess, (nested_loop_desc, resource, bufshr, constraint, cost, options, [tifm], [tofm], list_tbat, list_ords))
results.append(r)
for lbs in heapq.nsmallest(options.ntops, retrieve_func, key=_loop_blocking_cmp_key(options, cost)):
(yield lbs)
if (pool is not None):
pool.close()
pool.join() |
def test_anemia(tmp_path: pathlib.Path):
outcome_codes: set = {'LOINC/LP392452-1', 'child_1_1', 'child_1'}
labeler = _create_specific_labvalue_labeler(AnemiaLabValueLabeler, 'severe', outcome_codes)
_assert_value_to_label_correct(labeler, (69.9 / 10), (109.99 / 10), (119.999 / 10), (121 / 10), 'g/dl')
_run_specific_labvalue_test(labeler, outcome_codes, [((30 / 10), 'g/dL'), ((65.1 * 100), 'mg/dL')], [((100 / 10), 'g/dL'), ((109 / 10), 'g/dL')], [((115 / 10), 'g/dL'), ((119.9 / 10), 'g/dL')], [((150.123 / 10), 'g/dL'), ((200 * 100), 'mg/dL')], 'test_anemia') |
class RandomScale(object):
def __init__(self, limit):
self.limit = limit
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
scale = random.uniform(self.limit[0], self.limit[1])
w = int((scale * img.size[0]))
h = int((scale * img.size[1]))
(img, mask) = (img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST))
return {'image': img, 'label': mask} |
class TemporalRandomWalk(GraphWalk):
def __init__(self, graph, cw_size=None, max_walk_length=80, initial_edge_bias=None, walk_bias=None, p_walk_success_threshold=0.01, seed=None):
super().__init__(graph, graph_schema=None, seed=seed)
self.cw_size = cw_size
self.max_walk_length = max_walk_length
self.initial_edge_bias = initial_edge_bias
self.walk_bias = walk_bias
self.p_walk_success_threshold = p_walk_success_threshold
def run(self, num_cw, cw_size=None, max_walk_length=None, initial_edge_bias=None, walk_bias=None, p_walk_success_threshold=None, seed=None):
cw_size = _default_if_none(cw_size, self.cw_size, 'cw_size')
max_walk_length = _default_if_none(max_walk_length, self.max_walk_length, 'max_walk_length')
initial_edge_bias = _default_if_none(initial_edge_bias, self.initial_edge_bias, 'initial_edge_bias', ensure_not_none=False)
walk_bias = _default_if_none(walk_bias, self.walk_bias, 'walk_bias', ensure_not_none=False)
p_walk_success_threshold = _default_if_none(p_walk_success_threshold, self.p_walk_success_threshold, 'p_walk_success_threshold')
if (cw_size < 2):
raise ValueError(f'cw_size: context window size should be greater than 1, found {cw_size}')
if (max_walk_length < cw_size):
raise ValueError(f'max_walk_length: maximum walk length should not be less than the context window size, found {max_walk_length}')
(_, np_rs) = self._get_random_state(seed)
walks = []
num_cw_curr = 0
(sources, targets, _, times) = self.graph.edge_arrays(include_edge_weight=True)
edge_biases = self._temporal_biases(times, None, bias_type=initial_edge_bias, is_forward=False)
successes = 0
failures = 0
def not_progressing_enough():
posterior = stats.beta.ppf(0.95, (1 + successes), (1 + failures))
return (posterior < p_walk_success_threshold)
while (num_cw_curr < num_cw):
first_edge_index = self._sample(len(times), edge_biases, np_rs)
src = sources[first_edge_index]
dst = targets[first_edge_index]
t = times[first_edge_index]
remaining_length = (((num_cw - num_cw_curr) + cw_size) - 1)
walk = self._walk(src, dst, t, min(max_walk_length, remaining_length), walk_bias, np_rs)
if (len(walk) >= cw_size):
walks.append(walk)
num_cw_curr += ((len(walk) - cw_size) + 1)
successes += 1
else:
failures += 1
if not_progressing_enough():
raise RuntimeError(f'Discarded {failures} walks out of {(failures + successes)}. Too many temporal walks are being discarded for being too short. Consider using a smaller context window size (currently cw_size={cw_size}).')
return walks
def _sample(self, n, biases, np_rs):
if (biases is not None):
assert (len(biases) == n)
return naive_weighted_choices(np_rs, biases)
else:
return np_rs.choice(n)
def _exp_biases(self, times, t_0, decay):
return softmax(((t_0 - np.array(times)) if decay else (np.array(times) - t_0)))
def _temporal_biases(self, times, time, bias_type, is_forward):
if (bias_type is None):
return None
t_0 = (time if (time is not None) else min(times))
if (bias_type == 'exponential'):
return self._exp_biases(times, t_0, decay=is_forward)
else:
raise ValueError('Unsupported bias type')
def _step(self, node, time, bias_type, np_rs):
(neighbours, times) = self.graph.neighbor_arrays(node, include_edge_weight=True)
neighbours = neighbours[(times > time)]
times = times[(times > time)]
if (len(neighbours) > 0):
biases = self._temporal_biases(times, time, bias_type, is_forward=True)
chosen_neighbour_index = self._sample(len(neighbours), biases, np_rs)
assert (chosen_neighbour_index is not None), 'biases should never be all zero'
next_node = neighbours[chosen_neighbour_index]
next_time = times[chosen_neighbour_index]
return (next_node, next_time)
else:
return None
def _walk(self, src, dst, t, length, bias_type, np_rs):
walk = [src, dst]
(node, time) = (dst, t)
for _ in range((length - 2)):
result = self._step(node, time=time, bias_type=bias_type, np_rs=np_rs)
if (result is not None):
(node, time) = result
walk.append(node)
else:
break
return walk |
def test_zetac():
x = [(- 2.1), 0.8, 0.9999, 9, 50, 75]
desired = [(- 0.), (- 5.), (- 10000.), 0., 8.e-16, 2.e-23]
assert_allclose(sc.zetac(x), desired, rtol=1e-12) |
class MultivariateAdapter(GaugeAdapter):
variable_re = re.compile('(?:(\\d+):)?RESULT-(\\w+):(?:(\\w+):)?\\s*(\\d+(\\.\\d+)?)')
def __init__(self, include_faulty, executor):
super(MultivariateAdapter, self).__init__(include_faulty, executor)
self._other_error_definitions = [re.compile('FAILED')]
def parse_data(self, data, run_id, invocation):
iteration = 1
data_points = []
current = DataPoint(run_id)
for line in data.split('\n'):
if self.check_for_error(line):
raise ResultsIndicatedAsInvalid('Output of bench program indicated error.')
match = self.variable_re.match(line)
if match:
(cnt, variable, unit, value_thing, floatpart) = match.groups()
if (cnt is not None):
counter = int(cnt)
while (counter >= len(data_points)):
data_points.append(DataPoint(run_id))
current = data_points[counter]
if (floatpart is None):
value = int(value_thing)
else:
value = float(value_thing)
measure = Measurement(invocation, iteration, value, (unit if (unit is not None) else 'ms'), run_id, variable)
current.add_measurement(measure)
if ((cnt is None) and measure.is_total()):
data_points.append(current)
current = DataPoint(run_id)
iteration += 1
if (not data_points):
raise OutputNotParseable(data)
return data_points |
def main(argv):
task_files = [t.replace('.py', '') for t in os.listdir(task.TASKS_PATH) if ((t != '__init__.py') and t.endswith('.py'))]
if (len(FLAGS.tasks) > 0):
for t in FLAGS.tasks:
if (t not in task_files):
raise ValueError(('Task %s not recognised!.' % t))
task_files = FLAGS.tasks
tasks = [task_file_to_task_class(t) for t in task_files]
manager = Manager()
result_dict = manager.dict()
file_lock = manager.Lock()
task_index = manager.Value('i', 0)
variation_count = manager.Value('i', 0)
lock = manager.Lock()
check_and_make(FLAGS.save_path)
processes = [Process(target=run, args=(i, lock, task_index, variation_count, result_dict, file_lock, tasks)) for i in range(FLAGS.processes)]
[t.start() for t in processes]
[t.join() for t in processes]
print('Data collection done!')
for i in range(FLAGS.processes):
print(result_dict[i]) |
class ExtractDatasetStats(Job):
def __init__(self, config, returnn_python_exe=RETURNN_PYTHON_EXE, returnn_root=RETURNN_SRC_ROOT):
self.config = RETURNNConfig(config, {})
self.crnn_python_exe = returnn_python_exe
self.crnn_root = returnn_root
self.mean = self.output_var('mean_var')
self.std_dev = self.output_var('std_dev_var')
self.mean_file = self.output_path('mean')
self.std_dev_file = self.output_path('std_dev')
def tasks(self):
(yield Task('run', rqmt={'cpu': 1, 'mem': 4, 'time': 4}, mini_task=True))
def run(self):
self.config.write('crnn.config')
with open('rnn.sh', 'wt') as f:
f.write(('#!/usr/bin/env bash\n%s' % ' '.join([tk.uncached_path(self.crnn_python_exe), os.path.join(tk.uncached_path(self.crnn_root), 'tools/dump-dataset.py'), 'crnn.config', '--endseq -1', '--stats', '--dump_stats stats'])))
os.chmod('rnn.sh', ((((((stat.S_IRUSR | stat.S_IRGRP) | stat.S_IROTH) | stat.S_IWUSR) | stat.S_IXUSR) | stat.S_IXGRP) | stat.S_IXOTH))
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'UTF-8'
subprocess.check_call(['./rnn.sh'], env=env)
self.sh('cp stats.mean.txt {mean_file}')
self.sh('cp stats.std_dev.txt {std_dev_file}')
total_mean = 0
total_var = 0
mean_file = open('stats.mean.txt')
std_dev_file = open('stats.std_dev.txt')
for (i, (mean, std_dev)) in enumerate(zip(mean_file, std_dev_file)):
mean = float(mean)
var = (float(std_dev.strip()) ** 2)
print(var)
total_mean = (((total_mean * i) + mean) / (i + 1))
total_var = ((((total_var * i) + var) + ((((total_mean - mean) ** 2) * i) / (i + 1))) / (i + 1))
print(total_var)
self.mean.set(total_mean)
self.std_dev.set(numpy.sqrt(total_var))
print(numpy.sqrt(total_var)) |
def large_imagenet_config():
return tf.contrib.training.HParams(stem_multiplier=3.0, dense_dropout_keep_prob=0.5, num_cells=12, filter_scaling_rate=2.0, num_conv_filters=216, drop_path_keep_prob=0.6, use_aux_head=1, num_reduction_layers=2, data_format='NHWC', skip_reduction_layer_input=1, total_training_steps=250000) |
def get_calibrator(method, **kwargs):
if ('entropy' in method):
return IInt8EntropyCalibrator2(**kwargs)
elif ('minmax' in method):
return IInt8MinMaxCalibrator(**kwargs)
else:
raise ValueError('Invalid calibration method requested') |
class TestCluster(abc.ABC):
def type_system(self) -> TypeSystem:
def linenos(self) -> int:
def log_cluster_statistics(self) -> None:
def add_generator(self, generator: GenericAccessibleObject) -> None:
def add_accessible_object_under_test(self, objc: GenericAccessibleObject, data: _CallableData) -> None:
def add_modifier(self, typ: TypeInfo, obj: GenericAccessibleObject) -> None:
def accessible_objects_under_test(self) -> OrderedSet[GenericAccessibleObject]:
def function_data_for_accessibles(self) -> dict[(GenericAccessibleObject, _CallableData)]:
def num_accessible_objects_under_test(self) -> int:
def get_generators_for(self, typ: ProperType) -> tuple[(OrderedSet[GenericAccessibleObject], bool)]:
def get_modifiers_for(self, typ: ProperType) -> OrderedSet[GenericAccessibleObject]:
def generators(self) -> dict[(ProperType, OrderedSet[GenericAccessibleObject])]:
def modifiers(self) -> dict[(TypeInfo, OrderedSet[GenericAccessibleObject])]:
def get_random_accessible(self) -> (GenericAccessibleObject | None):
def get_random_call_for(self, typ: ProperType) -> GenericAccessibleObject:
def get_all_generatable_types(self) -> list[ProperType]:
def select_concrete_type(self, typ: ProperType) -> ProperType:
def track_statistics_values(self, tracking_fun: Callable[([RuntimeVariable, Any], None)]) -> None:
def update_return_type(self, accessible: GenericCallableAccessibleObject, new_type: ProperType) -> None:
def update_parameter_knowledge(self, accessible: GenericCallableAccessibleObject, param_name: str, knowledge: tt.UsageTraceNode) -> None: |
def get_filtered(image, cutoffs, squared_butterworth=True, order=3.0, npad=0):
lowpass_filtered = []
highpass_filtered = []
for cutoff in cutoffs:
lowpass_filtered.append(filters.butterworth(image, cutoff_frequency_ratio=cutoff, order=order, high_pass=False, squared_butterworth=squared_butterworth, npad=npad))
highpass_filtered.append(filters.butterworth(image, cutoff_frequency_ratio=cutoff, order=order, high_pass=True, squared_butterworth=squared_butterworth, npad=npad))
return (lowpass_filtered, highpass_filtered) |
class TorchISTFT(nn.Module):
def __init__(self, n_fft: int=4096, n_hop: int=1024, center: bool=False, sample_rate: float=44100.0, window: Optional[nn.Parameter]=None) -> None:
super(TorchISTFT, self).__init__()
self.n_fft = n_fft
self.n_hop = n_hop
self.center = center
self.sample_rate = sample_rate
if (window is None):
self.window = nn.Parameter(torch.hann_window(n_fft), requires_grad=False)
else:
self.window = window
def forward(self, X: Tensor, length: Optional[int]=None) -> Tensor:
shape = X.size()
X = X.reshape((- 1), shape[(- 3)], shape[(- 2)], shape[(- 1)])
y = torch.istft(torch.view_as_complex(X), n_fft=self.n_fft, hop_length=self.n_hop, window=self.window, center=self.center, normalized=False, onesided=True, length=length)
y = y.reshape((shape[:(- 3)] + y.shape[(- 1):]))
return y |
def build_engine(cfg, datamanager, model, optimizer, scheduler):
if (cfg.data.type == 'image'):
if (cfg.loss.name == 'softmax'):
engine = ImageSoftmaxEngine(datamanager, model, optimizer=optimizer, scheduler=scheduler, use_gpu=cfg.use_gpu, label_smooth=cfg.loss.softmax.label_smooth)
else:
engine = torchreid.engine.ImageTripletEngine(datamanager, model, optimizer=optimizer, margin=cfg.loss.triplet.margin, weight_t=cfg.loss.triplet.weight_t, weight_x=cfg.loss.triplet.weight_x, scheduler=scheduler, use_gpu=cfg.use_gpu, label_smooth=cfg.loss.softmax.label_smooth)
elif (cfg.loss.name == 'softmax'):
engine = torchreid.engine.VideoSoftmaxEngine(datamanager, model, optimizer=optimizer, scheduler=scheduler, use_gpu=cfg.use_gpu, label_smooth=cfg.loss.softmax.label_smooth, pooling_method=cfg.video.pooling_method)
else:
engine = torchreid.engine.VideoTripletEngine(datamanager, model, optimizer=optimizer, margin=cfg.loss.triplet.margin, weight_t=cfg.loss.triplet.weight_t, weight_x=cfg.loss.triplet.weight_x, scheduler=scheduler, use_gpu=cfg.use_gpu, label_smooth=cfg.loss.softmax.label_smooth)
return engine |
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
(kindselect, charselect, typename) = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split(',')]
l = []
c = re.compile('(?P<start>[a-zA-Z]+)')
for a in attrspec:
if (not a):
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = (s + a[len(s):])
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split(',')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split(' ')]:
if e1:
el1.append(e1.replace('_', ' '))
for e in el1:
m = namepattern.match(e)
if (not m):
outmess(('updatevars: no name pattern found for entity=%s. Skipping.\n' % repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if (ename in groupcache[groupcounter]['vars']):
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = ('typespec' not in edecl)
if not_has_typespec:
edecl['typespec'] = typespec
elif (typespec and (not (typespec == edecl['typespec']))):
outmess(('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typespec'], typespec)))
if ('kindselector' not in edecl):
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if ((k in edecl['kindselector']) and (not (kindselect[k] == edecl['kindselector'][k]))):
outmess(('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['kindselector'][k], kindselect[k])))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if (('charselector' not in edecl) and charselect):
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess(('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' % (ename, charselect)))
elif charselect:
for k in list(charselect.keys()):
if ((k in edecl['charselector']) and (not (charselect[k] == edecl['charselector'][k]))):
outmess(('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k, ename, edecl['charselector'][k], charselect[k])))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if ('typename' not in edecl):
edecl['typename'] = typename
elif (typename and (not (edecl['typename'] == typename))):
outmess(('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['typename'], typename)))
if ('attrspec' not in edecl):
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if (a not in edecl['attrspec']):
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if (d1[(lk + '2')] is not None):
d1[lk] = d1[(lk + '2')]
del d1[(lk + '2')]
for k in list(d1.keys()):
if (d1[k] is not None):
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if (('len' in d1) and ('array' in d1)):
if (d1['len'] == ''):
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = ((d1['array'] + ',') + d1['len'])
del d1['len']
errmess(('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (typespec, e, typespec, ename, d1['array'])))
if ('array' in d1):
dm = ('dimension(%s)' % d1['array'])
if (('attrspec' not in edecl) or (not edecl['attrspec'])):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if ((dm1[:9] == 'dimension') and (dm1 != dm)):
del edecl['attrspec'][(- 1)]
errmess(('updatevars:%s: attempt to change %r to %r. Ignoring.\n' % (ename, dm1, dm)))
break
if ('len' in d1):
if (typespec in ['complex', 'integer', 'logical', 'real']):
if (('kindselector' not in edecl) or (not edecl['kindselector'])):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif (typespec == 'character'):
if (('charselector' not in edecl) or (not edecl['charselector'])):
edecl['charselector'] = {}
if ('len' in edecl['charselector']):
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if ('init' in d1):
if (('=' in edecl) and (not (edecl['='] == d1['init']))):
outmess(('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename, edecl['='], d1['init'])))
else:
edecl['='] = d1['init']
else:
outmess(('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (ename + m.group('after'))))
for k in list(edecl.keys()):
if (not edecl[k]):
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if ('varnames' in groupcache[groupcounter]):
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name |
class MatrixProductOperator(scipy.sparse.linalg.LinearOperator):
def __init__(self, A, B):
if ((A.ndim != 2) or (B.ndim != 2)):
raise ValueError('expected ndarrays representing matrices')
if (A.shape[1] != B.shape[0]):
raise ValueError('incompatible shapes')
self.A = A
self.B = B
self.ndim = 2
self.shape = (A.shape[0], B.shape[1])
def _matvec(self, x):
return np.dot(self.A, np.dot(self.B, x))
def _rmatvec(self, x):
return np.dot(np.dot(x, self.A), self.B)
def _matmat(self, X):
return np.dot(self.A, np.dot(self.B, X))
def T(self):
return MatrixProductOperator(self.B.T, self.A.T) |
class sAR_reg(atomic_reg):
OP_NAME = 'sAR'
_fields_ = [('cmd_short', ctypes.c_uint64, 1), ('cmd_id', ctypes.c_uint64, 20), ('cmd_id_dep', ctypes.c_uint64, 20), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opd0_const', ctypes.c_uint64, 1), ('opd1_const', ctypes.c_uint64, 1), ('opd2_const', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('cmd_id_en', ctypes.c_uint64, 4), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('res0_prec', ctypes.c_uint64, 3), ('opd0_prec', ctypes.c_uint64, 3), ('opd1_prec', ctypes.c_uint64, 3), ('opd2_prec', ctypes.c_uint64, 3), ('opd0_sign', ctypes.c_uint64, 1), ('opd1_sign', ctypes.c_uint64, 1), ('res0_str', ctypes.c_uint64, 3), ('opd0_str', ctypes.c_uint64, 3), ('opd1_str', ctypes.c_uint64, 3), ('opd2_n_str', ctypes.c_uint64, 3), ('rsvd0', ctypes.c_uint64, 6), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32), ('opd1_addr', ctypes.c_uint64, 32), ('opd2_addr', ctypes.c_uint64, 32), ('res0_n_str', ctypes.c_uint64, 16), ('res0_c_str', ctypes.c_uint64, 16), ('opd0_n_str', ctypes.c_uint64, 16), ('opd0_c_str', ctypes.c_uint64, 16), ('opd1_n_str', ctypes.c_uint64, 16), ('opd1_c_str', ctypes.c_uint64, 16), ('res0_h_str', ctypes.c_uint64, 20), ('res0_w_str', ctypes.c_uint64, 20), ('opd0_h_str', ctypes.c_uint64, 20), ('opd2_sign', ctypes.c_uint64, 1), ('rsvd1', ctypes.c_uint64, 3), ('opd0_w_str', ctypes.c_uint64, 20), ('opd1_h_str', ctypes.c_uint64, 20), ('opd1_w_str', ctypes.c_uint64, 20), ('rsvd2', ctypes.c_uint64, 4)]
cmd_short: int
cmd_id: int
cmd_id_dep: int
tsk_typ: int
tsk_eu_typ: int
opd0_const: int
opd1_const: int
opd2_const: int
tsk_opd_num: int
cmd_id_en: int
pwr_step: int
intr_en: int
res0_prec: int
opd0_prec: int
opd1_prec: int
opd2_prec: int
opd0_sign: int
opd1_sign: int
res0_str: int
opd0_str: int
opd1_str: int
opd2_n_str: int
rsvd0: int
res0_n: int
res0_c: int
res0_h: int
res0_w: int
res0_addr: int
opd0_addr: int
opd1_addr: int
opd2_addr: int
res0_n_str: int
res0_c_str: int
opd0_n_str: int
opd0_c_str: int
opd1_n_str: int
opd1_c_str: int
res0_h_str: int
res0_w_str: int
opd0_h_str: int
opd2_sign: int
rsvd1: int
opd0_w_str: int
opd1_h_str: int
opd1_w_str: int
rsvd2: int
length: int = 512 |
def filter_attributes_with_known_counts(graph, known_attrs):
for (attr, val) in known_attrs[::(- 1)]:
for ii in graph['history']:
if ('count' not in ii):
continue
if (ii.get(attr, None) == val):
known_attrs.remove((attr, val))
return known_attrs |
def fitness_fn(x):
dimension = tf.cast(tf.shape(x)[1], tf.float64)
return ((418.9829 * dimension) - tf.reduce_sum((x * tf.sin(tf.sqrt(tf.abs(x)))), axis=1)) |
def simSetModelProperty(objectHandle, prop):
ret = lib.simSetModelProperty(objectHandle, prop)
_check_return(ret) |
.parametrize('extensionarray', [False, True])
def test_indexedoptionarray_emptyarray(tmp_path, extensionarray):
akarray = ak.contents.IndexedOptionArray(ak.index.Index64(np.array([(- 1), (- 1), (- 1), (- 1), (- 1)], dtype=np.int64)), ak.contents.EmptyArray(), parameters={'which': 'outer'})
paarray = akarray.to_arrow(extensionarray=extensionarray)
arrow_round_trip(akarray, paarray, extensionarray)
parquet_round_trip(akarray, paarray, extensionarray, tmp_path) |
.parametrize('value, expected', (('application/problem+json', True), ('application/json', True), ('application/xml', False), ('text/plain', False)))
def test_is_json_media_type(value, expected):
assert (is_json_media_type(value) is expected) |
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = ([gpus] * 4)
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(device=gpus[0], memory_format=layouts[0], dtype=dtypes[0])
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(device=gpus[1], memory_format=layouts[1], dtype=dtypes[1])
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(device=gpus[2], memory_format=layouts[2], dtype=dtypes[2])
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(device=gpus[3], memory_format=layouts[3], dtype=dtypes[3])
def forward(self, x):
x = x.to(self.dtypes[0])
gpus = (self.layer_gpus if hasattr(self, 'layer_gpus') else ([x.device] * 4))
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x) |
class VQLPIPS(nn.Module):
def __init__(self):
super().__init__()
self.perceptual_loss = LPIPS().eval()
def forward(self, targets, reconstructions):
return self.perceptual_loss(targets.contiguous(), reconstructions.contiguous()).mean() |
def index_of_first(list_, pred):
for (i, x) in enumerate(list_):
if pred(x):
return i
return None |
def tfidf_loading(use_tfidf, w_emb, cfg):
data_dir = cfg.DATASET.DATA_DIR
if use_tfidf:
if cfg.TRAIN.QUESTION.USEDATA:
dict = Dictionary.load_from_file(os.path.join(data_dir, 'dictionary.pkl'))
if cfg.TRAIN.QUESTION.USEDATA:
if (os.path.isfile(os.path.join(data_dir, 'embed_tfidf_weights.pkl')) == True):
print('Loading embedding tfidf and weights from file')
with open(os.path.join(data_dir, 'embed_tfidf_weights.pkl'), 'rb') as f:
w_emb = torch.load(f)
print('Load embedding tfidf and weights from file successfully')
else:
print("Embedding tfidf and weights haven't been saving before")
(tfidf, weights) = tfidf_from_questions(['train', 'test'], cfg, dict)
w_emb.init_embedding(os.path.join(data_dir, 'glove6b_init_300d.npy'), tfidf, weights)
with open(os.path.join(data_dir, 'embed_tfidf_weights.pkl'), 'wb') as f:
torch.save(w_emb, f)
print('Saving embedding with tfidf and weights successfully')
return w_emb |
def get_wilds_loaders(dataset, data_dir, data_fraction=1.0, model_seed=0):
config = get_default_config(dataset, data_fraction=data_fraction)
dataset_kwargs = ({'fold': POVERTY_FOLDS[model_seed]} if (dataset == 'poverty') else {})
full_dataset = get_dataset(dataset=dataset, root_dir=data_dir, **dataset_kwargs)
train_grouper = CombinatorialGrouper(dataset=full_dataset, groupby_fields=config.groupby_fields)
if (dataset == 'fmow'):
config.batch_size = (config.batch_size // 2)
train_transform = initialize_transform(transform_name=config.train_transform, config=config, dataset=full_dataset)
train_data = full_dataset.get_subset('train', frac=config.frac, transform=train_transform)
train_loader = get_train_loader(loader=config.train_loader, dataset=train_data, batch_size=config.batch_size, uniform_over_groups=config.uniform_over_groups, grouper=train_grouper, distinct_groups=config.distinct_groups, n_groups_per_batch=config.n_groups_per_batch, **config.loader_kwargs)
eval_transform = initialize_transform(transform_name=config.eval_transform, config=config, dataset=full_dataset)
try:
val_str = ('val' if (dataset == 'fmow') else 'id_val')
val_data = full_dataset.get_subset(val_str, frac=config.frac, transform=eval_transform)
val_loader = get_eval_loader(loader=config.eval_loader, dataset=val_data, batch_size=config.batch_size, grouper=train_grouper, **config.loader_kwargs)
except:
print(f"{dataset} dataset doesn't have an in-distribution validation split -- using train split instead!")
val_loader = train_loader
try:
in_test_data = full_dataset.get_subset('id_test', frac=config.frac, transform=eval_transform)
in_test_loader = get_eval_loader(loader=config.eval_loader, dataset=in_test_data, batch_size=config.batch_size, grouper=train_grouper, **config.loader_kwargs)
except:
print(f"{dataset} dataset doesn't have an in-distribution test split -- using validation split instead!")
in_test_loader = val_loader
train_loader = ProperDataLoader(train_loader)
val_loader = ProperDataLoader(val_loader)
in_test_loader = ProperDataLoader(in_test_loader)
return (train_loader, val_loader, in_test_loader) |
def dict_serialize(dict_instance: Dict[(str, Any)]) -> 'IOData':
import scqubits.io_utils.fileio as io
dict_instance = utils.remove_nones(dict_instance)
attributes: Dict[(str, Any)] = {}
ndarrays: Dict[(str, ndarray)] = {}
objects: Dict[(str, object)] = {}
typename = 'dict'
for (name, content) in dict_instance.items():
update_func = type_dispatch(content)
(attributes, ndarrays, objects) = update_func(name, content, attributes, ndarrays, objects)
return io.IOData(typename, attributes, ndarrays, objects) |
class Phi6(CompositeBase):
def __init__(self, N, quad='GC', bc=((0,) * 12), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
self._stencil = {0: (1 / (((((((32 * sp.pi) * (n + 1)) * (n + 2)) * (n + 3)) * (n + 4)) * (n + 5)) * (n + 6))), 2: ((- 3) / (((((((16 * sp.pi) * (n + 1)) * (n + 3)) * (n + 4)) * (n + 5)) * (n + 6)) * (n + 7))), 4: (15 / (((((((32 * sp.pi) * (n + 2)) * (n + 3)) * (n + 5)) * (n + 6)) * (n + 7)) * (n + 8))), 6: ((- 5) / (((((((8 * sp.pi) * (n + 3)) * (n + 4)) * (n + 5)) * (n + 7)) * (n + 8)) * (n + 9))), 8: (15 / (((((((32 * sp.pi) * (n + 4)) * (n + 5)) * (n + 6)) * (n + 7)) * (n + 9)) * (n + 10))), 10: ((- 3) / (((((((16 * sp.pi) * (n + 5)) * (n + 6)) * (n + 7)) * (n + 8)) * (n + 9)) * (n + 11))), 12: (1 / (((((((32 * sp.pi) * (n + 6)) * (n + 7)) * (n + 8)) * (n + 9)) * (n + 10)) * (n + 11)))}
def boundary_condition():
return '12th order'
def short_name():
return 'P6' |
def DeepLabV3Plus(img_height, img_width, nclasses=66):
print('*** Building DeepLabv3Plus Network ***')
base_model = ResNet50(input_shape=(img_height, img_width, 3), weights='imagenet', include_top=False)
image_features = base_model.get_layer('activation_39').output
x_a = ASPP(image_features)
x_a = Upsample(tensor=x_a, size=[(img_height // 4), (img_width // 4)])
x_b = base_model.get_layer('activation_9').output
x_b = Conv2D(filters=48, kernel_size=1, padding='same', kernel_initializer='he_normal', use_bias=False, name='low_level_projection')(x_b)
x_b = BatchNormalization(name=f'bn_low_level_projection')(x_b)
x_b = Activation('relu', name='low_level_activation')(x_b)
x = concatenate([x_a, x_b], name='decoder_concat')
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', kernel_initializer='he_normal', use_bias=False, name='decoder_conv2d_1')(x)
x = BatchNormalization(name=f'bn_decoder_1')(x)
x = Activation('relu', name='activation_decoder_1')(x)
x = Conv2D(filters=256, kernel_size=3, padding='same', activation='relu', kernel_initializer='he_normal', use_bias=False, name='decoder_conv2d_2')(x)
x = BatchNormalization(name=f'bn_decoder_2')(x)
x = Activation('relu', name='activation_decoder_2')(x)
x = Upsample(x, [img_height, img_width])
x = Conv2D(nclasses, (1, 1), padding='same', name='output_layer')(x)
x = Activation('softmax')(x)
print('*** Building Network Completed***')
model = Model(inputs=base_model.input, outputs=x, name='DeepLabV3_Plus')
print(f'*** Input_Shape => {model.input_shape} ***')
print(f'*** Output_Shape => {model.output_shape} ***')
return model |
def generalized_rcnn(model):
return build_generic_detection_model(model, get_func(cfg.MODEL.CONV_BODY), add_roi_box_head_func=get_func(cfg.FAST_RCNN.ROI_BOX_HEAD), add_roi_mask_head_func=get_func(cfg.MRCNN.ROI_MASK_HEAD), add_roi_keypoint_head_func=get_func(cfg.KRCNN.ROI_KEYPOINTS_HEAD), add_roi_box_head_ohem_func=get_func(cfg.FAST_RCNN.ROI_BOX_HEAD_OHEM), freeze_conv_body=cfg.TRAIN.FREEZE_CONV_BODY) |
def GenerateSM75_TensorOp_8816_TN(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 10, 2)):
return
layouts = [(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor)]
math_instructions = [MathInstruction([8, 8, 16], DataType.s8, DataType.s8, DataType.s32, OpcodeClass.TensorOp, MathOperation.multiply_add_saturate), MathInstruction([8, 8, 16], DataType.u8, DataType.u8, DataType.s32, OpcodeClass.TensorOp, MathOperation.multiply_add_saturate)]
min_cc = 75
max_cc = 1024
alignment_constraints = [16]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
if (math_inst.element_a != math_inst.element_accumulator):
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if (op.tile_description.threadblock_shape[1] >= 128):
op.C.alignment = 16
else:
op.C.alignment = 8 |
_node(optplan.SimulationSpace)
def create_simulation_space(params: optplan.SimulationSpace, work: workspace.Workspace) -> SimulationSpace:
return SimulationSpace(params, work.filepath) |
.sm70
.skipif((not has_pytorch()), reason='Pytorch not installed.')
_utils.test(arch=archs_support_f16)
def test_from_torch():
import torch
n = 16
y = ti.field(dtype=ti.f16, shape=n)
x = torch.arange(0, n).to(torch.float16)
y.from_torch(x)
def init():
for i in y:
y[i] = (3 * i)
init()
z = y.to_torch()
for i in range(n):
assert (z[i] == (i * 3)) |
def layer_init(layer, w_scale=1.0):
if (hasattr(layer, 'weight') and (len(layer.weight.shape) > 1)):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer |
def generate_body(outputs: List[Node], partition: List[Node], partition_layer_nodes_to_field_id: Dict[(Node, str)], ready_expressions: Dict[(Node, str)]) -> List[str]:
uses = node_uses(partition, set(outputs))
for e in ready_expressions:
uses[e] = 100000
statements = generate_statements(partition, partition_layer_nodes_to_field_id, ready_expressions, uses)
return_statement = generate_return_statement(outputs, ready_expressions)
statements.append(return_statement)
return statements |
def parse_open_entity_dataset(path: str):
with open(path, 'r') as f:
for line in f:
example = json.loads(line.strip())
labels = [l for l in example['y_str'] if (l in ENTITY_LABELS)]
left_context_text = ' '.join(example['left_context_token'])
right_context_text = ' '.join(example['right_context_token'])
sentence = ((((((left_context_text + ENT) + ' ') + example['mention_span']) + ENT) + ' ') + right_context_text)
for (before, after) in NORMALIZATION_TABLE:
sentence = sentence.replace(before, after)
(yield {'sentence': sentence, 'labels': labels}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.