code stringlengths 101 5.91M |
|---|
class DistModule(nn.Module):
def __init__(self, module, bn_method=0):
super(DistModule, self).__init__()
self.module = module
self.bn_method = bn_method
if (get_world_size() > 1):
broadcast_params(self.module)
else:
self.bn_method = 0
def forward(self, *args, **kwargs):
broadcast_buffers(self.module, self.bn_method)
return self.module(*args, **kwargs)
def train(self, mode=True):
super(DistModule, self).train(mode)
self.module.train(mode)
return self |
class ControlServicer(object):
def Start(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stop(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FlushAggregationQueue(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
class FlaxRobertaForTokenClassification(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def preprocess_blur_grayscale(net_preproc_fn, blur_radius=None, blur_prob=1.0, jitter=True):
preproc_fn = prepare_image_fn(jitter=jitter)
transform_list = [preproc_fn]
if ((blur_radius is not None) and (blur_prob > 0)):
transform_list.append(transforms.Lambda(generate_random_blur(blur_radius, blur_prob)))
transform_list.append(transforms.Grayscale(3))
transform_list.append(transforms.Lambda(net_preproc_fn))
return transforms.Compose(transform_list) |
def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, include):
cmd._check_compiler()
body = (textwrap.dedent('\n #include<%s>\n int %s %s(void)\n {\n %s;\n return 0;\n }\n\n int\n main()\n {\n return 0;\n }\n ') % (include, attribute, name, code))
return (cmd.try_compile(body, None, None) != 0) |
def drop_connect(x, drop_connect_rate, training):
if (not training):
return x
keep_prob = (1.0 - drop_connect_rate)
batch_size = x.shape[0]
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=x.dtype, device=x.device)
binary_mask = torch.floor(random_tensor)
x = ((x / keep_prob) * binary_mask)
return x |
class TrainingArguments(transformers.TrainingArguments):
wandb_project: str = field(default=constants.WANDB_PROJECT)
cache_dir: Optional[str] = field(default=constants.DEFAULT_CACHE_DIR)
flash_attn: bool = field(default=False)
optim: str = field(default='adamw_torch')
truncate_tokens: Optional[List[str]] = field(default_factory=(lambda : None), metadata={'help': 'Tokens in strings to truncate at first occurrence. This was used in original OAI summarization paper to avoid models returning incomplete sentences. '})
truncate_after: Optional[int] = field(default=None, metadata={'help': 'Truncate after this number of tokens. Prevents early truncation.'})
penalty_reward_value: float = field(default=(- 1.0), metadata={'help': 'Reward assigned to sequences that are truncated, e.g., due to outputting incomplete sentences for given context window.'})
total_epochs: int = field(default=10)
rollout_batch_size: int = field(default=512)
step_batch_size: int = field(default=256)
rollout_per_device_batch_size: int = field(default=32)
step_per_device_batch_size: int = field(default=2)
noptepochs: int = field(default=2)
vf_coef: float = field(default=0.1)
cliprange: float = field(default=0.2)
cliprange_value: float = field(default=0.2)
gamma: float = field(default=1.0)
lam: float = field(default=1.0)
whiten_rewards: bool = field(default=True)
adam_epsilon: float = field(default=1e-05, metadata={'help': 'Epsilon for AdamW optimizer. This is the default for OAI PPO code and UW Quark code. This is not the Hugging Face default.'})
temperature: float = field(default=1.0)
kl_coef: float = field(default=0.2)
target_kl: float = field(default=6.0)
k_beta: float = field(default=0.1)
adaptive_kl: bool = field(default=False)
eval_batches: int = field(default=sys.maxsize, metadata={'help': 'Maximum number of batches to evaluate on.'})
init_value_with_reward: bool = field(default=True, metadata={'help': 'Initialize the value model with the reward model.'})
save_steps_extra: Optional[str] = field(default=None, metadata={'help': "A list of predetermined checkpoints to save, represented in the format 'no1__no2__no3'. Parse this with str.split('__')."})
query_len: int = field(default=192)
response_len: int = field(default=300)
policy_model_name_or_path: str = field(default=None)
reward_model_name_or_path: str = field(default=None)
use_fast_tokenizer: bool = field(default=False, metadata={'help': 'Use fast tokenizer if True. Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. Use fast tokenizer only if you can live with that.'})
def __post_init__(self):
if self.tf32:
torch.backends.cuda.matmul.allow_tf32 = torch.backends.cudnn.allow_tf32 = True
world_size = distributed_utils.get_world_size()
assert (self.rollout_batch_size >= (self.rollout_per_device_batch_size * world_size)), 'rollout_batch_size is smaller than rollout_per_device_batch_size * world_size. Increase the former or decrease the latter to fix this.'
assert ((self.rollout_batch_size % (self.rollout_per_device_batch_size * world_size)) == 0), 'rollout_batch_size is not a multiple of rollout_per_device_batch_size * world_size. '
assert (self.step_batch_size >= (self.step_per_device_batch_size * world_size)), 'step_batch_size is smaller than step_per_device_batch_size * world_size. Increase the former or decrease the latter to fix this.'
assert ((self.step_batch_size % (self.step_per_device_batch_size * world_size)) == 0), 'step_batch_size is not a multiple of step_per_device_batch_size * world_size. '
logger.warning(f'''Rollout stats:
rollout_batch_size: {self.rollout_batch_size}
rollout_per_device_batch_size: {self.rollout_per_device_batch_size}
world_size: {world_size}
''')
assert (((self.rollout_batch_size // self.rollout_per_device_batch_size) % world_size) == 0)
self.rollout_accumulation_steps = ((self.rollout_batch_size // self.rollout_per_device_batch_size) // world_size)
logger.warning(f'''Step stats:
step_batch_size: {self.step_batch_size}
step_per_device_batch_size: {self.step_per_device_batch_size}
world_size: {world_size}
''')
assert (((self.step_batch_size // self.step_per_device_batch_size) % world_size) == 0)
self.gradient_accumulation_steps = ((self.step_batch_size // self.step_per_device_batch_size) // world_size)
logger.warning(f'''Accumulation steps:
rollout_accumulation_steps: {self.rollout_accumulation_steps}
gradient_accumulation_steps: {self.gradient_accumulation_steps}
''')
if (self.save_steps_extra is not None):
self.save_steps_extra_list = [int(string) for string in self.save_steps_extra.split('__')]
else:
self.save_steps_extra_list = []
def set_truncate_token_ids(self, tokenizer: transformers.PreTrainedTokenizer):
truncate_tokens = self.truncate_tokens
if (truncate_tokens is None):
truncate_token_ids = None
else:
truncate_token_ids = tokenizer.convert_tokens_to_ids(truncate_tokens)
self.truncate_token_ids = truncate_token_ids |
class AgentGroup(object):
def __init__(self, *agents, allow_duplicate_agents=False):
self.agents = agents
self.n = len(self.agents)
self.reset()
if (not all(((a0 is not a1) for (a0, a1) in itertools.combinations(agents, 2)))):
assert allow_duplicate_agents, 'All agents should be separate instances, unless allow_duplicate_agents is set to true'
def joint_action(self, state):
actions_and_probs_n = tuple((a.action(state) for a in self.agents))
return actions_and_probs_n
def set_mdp(self, mdp):
for a in self.agents:
a.set_mdp(mdp)
def reset(self):
for (i, agent) in enumerate(self.agents):
agent.reset()
agent.set_agent_index(i) |
def _get_ps_env(ps_info, config):
try:
parallax_log_level = os.environ['PARALLAX_LOG_LEVEL']
except:
parallax_log_level = logging.INFO
env = {'CUDA_VISIBLE_DEVICES': ','.join((str(gpuid) for gpuid in ps_info['gpus'])), 'PARALLAX_LOG_LEVEL': parallax_log_level, 'PARALLAX_RESOURCE_INFO': serialize_resource_info(config.resource_info)}
return env |
def test_inout_connector_validation_success_2():
sdfg = dace.SDFG('test_inout_connector_validation_success_2')
sdfg.add_array('A', [1], dace.int32)
nsdfg_0 = dace.SDFG('nested_sdfg_0')
nsdfg_0.add_array('B', [1], dace.int32)
nsdfg_1 = dace.SDFG('nested_sdfg_1')
nsdfg_1.add_array('C', [1], dace.int32)
nstate = nsdfg_1.add_state()
read_c = nstate.add_access('C')
write_c = nstate.add_access('C')
tasklet = nstate.add_tasklet('tasklet', {'__inp'}, {'__out'}, '__out = __inp + 5')
nstate.add_edge(read_c, None, tasklet, '__inp', dace.Memlet.from_array('C', nsdfg_1.arrays['C']))
nstate.add_edge(tasklet, '__out', write_c, None, dace.Memlet.from_array('C', nsdfg_1.arrays['C']))
nstate = nsdfg_0.add_state()
tasklet_0 = nstate.add_tasklet('tasklet_00', {}, {'__out'}, '__out = 3')
write_b_0 = nstate.add_access('B')
tasklet_1 = nstate.add_nested_sdfg(nsdfg_1, nsdfg_0, {'C'}, {'C'})
write_b_1 = nstate.add_access('B')
nstate.add_edge(tasklet_0, '__out', write_b_0, None, dace.Memlet.from_array('B', nsdfg_0.arrays['B']))
nstate.add_edge(write_b_0, None, tasklet_1, 'C', dace.Memlet.from_array('B', nsdfg_0.arrays['B']))
nstate.add_edge(tasklet_1, 'C', write_b_1, None, dace.Memlet.from_array('B', nsdfg_0.arrays['B']))
state = sdfg.add_state()
tasklet = state.add_nested_sdfg(nsdfg_0, sdfg, {}, {'B'})
write_a = state.add_access('A')
state.add_edge(tasklet, 'B', write_a, None, dace.Memlet.from_array('A', sdfg.arrays['A']))
try:
sdfg.validate()
except dace.sdfg.InvalidSDFGError:
assert False, 'SDFG should validate'
A = np.array([1], dtype=np.int32)
sdfg(A=A)
assert (A[0] == 8) |
_pattern(torch.nn.modules.conv.Conv2d)
_pattern((torch.nn.ReLU, torch.nn.modules.conv.Conv2d))
_pattern((torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.conv.Conv2d))
_pattern((torch.nn.ReLU, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.conv.Conv2d)))
class ConvNormRelu(MinMaxObserver):
def __init__(self, quantizer, node):
super().__init__(quantizer, node)
(self.relu_node, self.bn_node) = (None, None)
if isinstance(quantizer.modules[node.target], torch.nn.ReLU):
self.relu_node = node
node = node.args[0]
if isinstance(quantizer.modules[node.target], torch.nn.BatchNorm2d):
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
node = node.args[0]
assert isinstance(quantizer.modules[node.target], torch.nn.modules.Conv2d)
self.conv_node = node
self.conv = quantizer.modules[self.conv_node.target]
def quantize(self, quantizer, node, load_arg):
mod = self.conv
(weight, bias) = (mod.weight, mod.bias)
if (self.bn_node is not None):
(weight, bias) = fuse_conv_bn_weights(weight, bias, self.bn.running_mean, self.bn.running_var, self.bn.eps, self.bn.weight, self.bn.bias)
(min_val, max_val) = (float(weight.min()), float(weight.max()))
(act_scale, act_zp) = self.scale_zeropoint()
(weight_scale, weight_zp) = _minmax_scale_zeropoint(min_val, max_val)
qweight = torch.quantize_per_tensor(weight, weight_scale, weight_zp, torch.qint8)
ctor = (torch.nn.intrinsic.quantized.ConvReLU2d if (self.relu_node is not None) else torch.nn.quantized.Conv2d)
qconv = ctor(mod.in_channels, mod.out_channels, mod.kernel_size, mod.stride, mod.padding, mod.dilation, mod.groups, (mod.bias is not None), mod.padding_mode)
qconv.set_weight_bias(qweight, bias)
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
(parent_name, name) = _parent_name(self.conv_node.target)
setattr(quantizer.modules[parent_name], name, qconv)
if (self.bn_node is not None):
(parent_bn, bn_name) = _parent_name(self.bn_node.target)
setattr(quantizer.modules[parent_name], bn_name, IdentityModule())
return quantizer.quantized_graph.create_node('call_module', self.conv_node.target, (load_arg(self.conv_node.args[0]),), {}) |
def plot_legislative_allinOne():
fname = 'datasets/USLegis_processed/LegisEdgelist.txt'
G_times = USLegis_loader.load_legis_temporarl_edgelist(fname)
LAD = [3, 7]
label_sets = []
label_sets.append(LAD)
graph_name = 'USLegislative'
normal_util.all_in_one_compare(G_times, graph_name, label_sets, False) |
class DensePoseDataPointsVisualizer(object):
def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA, **kwargs):
self.points_visualizer = PointsVisualizer()
self.densepose_data_to_value_fn = densepose_data_to_value_fn
self.cmap = cmap
def visualize(self, image_bgr: Image, bbox_densepose_datas: Optional[Tuple[(Iterable[Boxes], Iterable[DensePoseDataRelative])]]) -> Image:
if (bbox_densepose_datas is None):
return image_bgr
for (bbox_xywh, densepose_data) in zip(*bbox_densepose_datas):
(x0, y0, w, h) = bbox_xywh.numpy()
x = (((densepose_data.x.numpy() * w) / 255.0) + x0)
y = (((densepose_data.y.numpy() * h) / 255.0) + y0)
pts_xy = zip(x, y)
if (self.densepose_data_to_value_fn is None):
image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy)
else:
v = self.densepose_data_to_value_fn(densepose_data)
img_colors_bgr = cv2.applyColorMap(v, self.cmap)
colors_bgr = [[int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr]
image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr)
return image_bgr |
(Output('clustering-loglines', 'children'), [Input('cluster-hist', 'clickData')])
def update_logline_list(data):
if (len(data) > 0):
cluster_label = data['points'][0]['label']
df = log_clustering.get_loglines(cluster_label)
columns = [{'name': c, 'id': c} for c in df.columns]
return dash_table.DataTable(data=df.to_dict('records'), columns=columns, style_table={'overflowX': 'scroll'}, style_cell={'max-width': '1020px', 'textAlign': 'left'}, editable=True, row_selectable='multi', sort_action='native', sort_mode='multi', column_selectable='single', page_action='native', page_size=20, page_current=0)
else:
return dash_table.DataTable() |
def trace_model(model, batch_size=256, device=torch.device('cpu')):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
model = torch.jit.trace_module(model, inputs=dict(forward=(example_images, example_text), encode_text=(example_text,), encode_image=(example_images,)))
model.visual.image_size = image_size
return model |
class CliffordAlgebraIndices(UniqueRepresentation, Parent):
def __init__(self, Qdim):
self._nbits = Qdim
self._cardinality = (2 ** Qdim)
category = FiniteEnumeratedSets().Facade()
Parent.__init__(self, category=category, facade=True)
def _element_constructor_(self, x):
if isinstance(x, (list, tuple, set, frozenset)):
if (len(x) > self._nbits):
raise ValueError(f'x={x!r} is too long')
return FrozenBitset(x)
if isinstance(x, int):
return FrozenBitset((x,))
def __call__(self, el):
if (not isinstance(el, Element)):
return self._element_constructor_(el)
else:
return Parent.__call__(self, el)
def cardinality(self):
return self._cardinality
__len__ = cardinality
def _repr_(self):
if (self._nbits == 0):
return 'Subsets of {}'
if (self._nbits == 1):
return 'Subsets of {0}'
if (self._nbits == 2):
return 'Subsets of {0,1}'
return f'Subsets of {{0,1,...,{(self._nbits - 1)}}}'
def _latex_(self):
if (self._nbits == 0):
return '\\mathcal{P}(\\emptyset)'
if (self._nbits == 1):
return '\\mathcal{P}({0})'
if (self._nbits == 2):
return '\\mathcal{P}({0,1})'
return f'\mathcal{{P}}({{0,1,\ldots,{(self._nbits - 1)}}})'
def __iter__(self):
import itertools
n = self._nbits
(yield FrozenBitset())
k = 1
while (k <= n):
for C in itertools.combinations(range(n), k):
(yield FrozenBitset(C))
k += 1
def __contains__(self, elt):
if isinstance(elt, int):
return ((elt < self._cardinality) and (elt >= 0))
if (not isinstance(elt, FrozenBitset)):
return False
return (elt.capacity() <= self._nbits)
def _an_element_(self):
if (not self._nbits):
return FrozenBitset()
from sage.combinat.subset import SubsetsSorted
X = SubsetsSorted(range(self._nbits))
return FrozenBitset(X.an_element()) |
def exclude_test_and_train_images(kitti_dir, exclude_lists_dir, exclude_target_dir, remove=False):
to_move = []
def exclude_from_seq(day_name, seq_str, image, view, distance=10):
seq_dir_rel = os.path.join(day_name, seq_str, view, 'data')
seq_dir_abs = os.path.join(kitti_dir, seq_dir_rel)
target_dir_abs = os.path.join(exclude_target_dir, seq_dir_rel)
if (not os.path.isdir(seq_dir_abs)):
print('Not found: {}'.format(seq_dir_abs))
return
try:
os.makedirs(target_dir_abs)
except:
pass
seq_files = sorted(os.listdir(seq_dir_abs))
image_num = frame_name_to_num(image)
try:
image_index = seq_files.index(image)
except ValueError:
return
start = max(0, (image_index - distance))
stop = min(len(seq_files), ((image_index + distance) + 2))
start_num = (image_num - distance)
stop_num = ((image_num + distance) + 2)
for i in range(start, stop):
filename = seq_files[i]
num = frame_name_to_num(filename)
if ((num < start_num) or (num >= stop_num)):
continue
to_move.append((os.path.join(seq_dir_abs, filename), os.path.join(target_dir_abs, filename)))
for filename in os.listdir(exclude_lists_dir):
exclude_list_path = os.path.join(exclude_lists_dir, filename)
with open(exclude_list_path) as f:
for line in f:
line = line.rstrip('\n')
if line.split(' ')[0].endswith('_10'):
splits = line.split(' ')[(- 1)].split('\\')
image = splits[(- 1)]
seq_str = splits[0]
(day_name, seq_name) = seq_str.split('_drive_')
seq_name = (seq_name.split('_')[0] + '_extract')
seq_str = ((day_name + '_drive_') + seq_name)
exclude_from_seq(day_name, seq_str, image, 'image_02')
exclude_from_seq(day_name, seq_str, image, 'image_03')
if remove:
print('Collected {} files. Deleting...'.format(len(to_move)))
else:
print('Collected {} files. Moving...'.format(len(to_move)))
for (i, data) in enumerate(to_move):
try:
(src, dst) = data
print('{} / {}: {}'.format(i, (len(to_move) - 1), src))
if remove:
os.remove(src)
else:
os.rename(src, dst)
except:
pass
return len(to_move) |
def proc(filename):
(tar, prd) = filename
tar_img = utils.load_img(tar)
prd_img = utils.load_img(prd)
PSNR = utils.calculate_psnr(tar_img, prd_img)
return PSNR |
class FullyConnectedLayer(Module):
def __init__(self, config, input_dim, output_dim, dropout_prob):
super(FullyConnectedLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.dropout_prob = dropout_prob
self.dense = Linear(self.input_dim, self.output_dim)
self.layer_norm = LayerNorm(self.output_dim, eps=config.layer_norm_eps)
self.activation_func = ACT2FN[config.hidden_act]
self.dropout = Dropout(self.dropout_prob)
def forward(self, inputs):
temp = inputs
temp = self.dense(temp)
temp = self.activation_func(temp)
temp = self.layer_norm(temp)
temp = self.dropout(temp)
return temp |
class ScanLengthResplit(torch.utils.data.Dataset):
in_sentences = []
out_sentences = []
index_table = {}
URL = '
def _load_dataset(self, cache_dir: str):
if ScanLengthResplit.in_sentences:
return
os.makedirs(cache_dir, exist_ok=True)
cache_file = os.path.join(cache_dir, 'scan.pth')
if (not os.path.isfile(cache_file)):
fn = os.path.join(cache_dir, os.path.split(self.URL)[(- 1)])
print('Downloading', self.URL)
download(self.URL, fn, ignore_if_exists=True)
with open(fn) as f:
for line in f:
line = line.split('OUT:')
line[0] = line[0].replace('IN:', '')
line = [l.strip() for l in line]
ScanLengthResplit.in_sentences.append(line[0])
ScanLengthResplit.out_sentences.append(line[1])
print('Constructing vocabularies')
ScanLengthResplit.in_vocabulary = WordVocabulary(self.in_sentences)
ScanLengthResplit.out_vocabulary = WordVocabulary(self.out_sentences)
ScanLengthResplit.in_sentences = [ScanLengthResplit.in_vocabulary(s) for s in ScanLengthResplit.in_sentences]
ScanLengthResplit.out_sentences = [ScanLengthResplit.out_vocabulary(s) for s in ScanLengthResplit.out_sentences]
ScanLengthResplit.max_in_len = max((len(l) for l in ScanLengthResplit.in_sentences))
ScanLengthResplit.max_out_len = max((len(l) for l in ScanLengthResplit.out_sentences))
print('Done.')
torch.save({'in_sentences': ScanLengthResplit.in_sentences, 'out_sentences': ScanLengthResplit.out_sentences, 'in_voc': ScanLengthResplit.in_vocabulary.state_dict(), 'out_voc': ScanLengthResplit.out_vocabulary.state_dict(), 'max_in_len': ScanLengthResplit.max_in_len, 'max_out_len': ScanLengthResplit.max_out_len}, cache_file)
else:
data = torch.load(cache_file)
ScanLengthResplit.in_vocabulary = WordVocabulary(None)
ScanLengthResplit.out_vocabulary = WordVocabulary(None)
ScanLengthResplit.in_vocabulary.load_state_dict(data['in_voc'])
ScanLengthResplit.out_vocabulary.load_state_dict(data['out_voc'])
ScanLengthResplit.in_sentences = data['in_sentences']
ScanLengthResplit.out_sentences = data['out_sentences']
ScanLengthResplit.max_in_len = data['max_in_len']
ScanLengthResplit.max_out_len = data['max_out_len']
def __init__(self, dset: str, len_range: Tuple[(int, int)], train_proprtion: float=0.9, cache_dir: str='./cache/scan_resplit'):
super().__init__()
self.cache_dir = cache_dir
self._load_dataset(cache_dir)
self.len_range = len_range
assert (dset in ['train', 'test', 'all'])
self.my_indices = [i for (i, o) in enumerate(self.out_sentences) if (len_range[0] <= len(o) <= len_range[1])]
if (dset != 'all'):
seed = np.random.RandomState(1234)
test_indices = set(seed.choice(len(self.my_indices), int((len(self.my_indices) * (1 - train_proprtion))), replace=False).tolist())
self.my_indices = [i for (ii, i) in enumerate(self.my_indices) if ((ii in test_indices) ^ (dset == 'train'))]
self.this_max_out_len = max((len(self.out_sentences[i]) for i in self.my_indices))
self.this_min_out_len = min((len(self.out_sentences[i]) for i in self.my_indices))
def __len__(self) -> int:
return len(self.my_indices)
def __getitem__(self, item: int) -> Dict[(str, Any)]:
index = self.my_indices[item]
in_seq = ScanLengthResplit.in_sentences[index]
out_seq = ScanLengthResplit.out_sentences[index]
return {'in': np.asarray(in_seq, np.int16), 'out': np.asarray(out_seq, np.int16), 'in_len': len(in_seq), 'out_len': len(out_seq)}
def get_output_size(self):
return len(self.out_vocabulary)
def get_input_size(self):
return len(self.in_vocabulary)
def start_test(self) -> TextSequenceTestState:
return TextSequenceTestState((lambda x: ' '.join(self.in_vocabulary(x))), (lambda x: ' '.join(self.out_vocabulary(x))))
def __str__(self):
return f'ScanLengthResplit(range=[{self.this_min_out_len}, {self.this_max_out_len}], len={len(self)})'
__repr__ = __str__ |
def test_max_three_scalars():
with goos.OptimizationPlan() as plan:
x = goos.Variable(2)
y = goos.Variable(3)
w = goos.Variable(1)
z = goos.max(x, y, w)
assert (z.get() == 3)
assert (z.get_grad([x])[0] == 0)
assert (z.get_grad([y])[0] == 1)
assert (z.get_grad([w])[0] == 0) |
(scope='session')
def atomic_data_fname(tardis_ref_path):
atomic_data_fname = ((tardis_ref_path / 'atom_data') / 'kurucz_cd23_chianti_H_He.h5')
atom_data_missing_str = f'{atomic_data_fname} atomic datafiles does not seem to exist'
if (not atomic_data_fname.exists()):
pytest.exit(atom_data_missing_str)
return atomic_data_fname |
def prepare_batch_inputs_qfvs(data, config, eval=False):
if (not eval):
(features, mask, seg_len, concept1_GT, concept2_GT, mask_GT, oracle_summary_GT, src_txt_1, src_txt_2, src_txt_mask_1, src_txt_mask_2, saliency_pos_labels_1, saliency_pos_labels_2, saliency_pos_labels_oracle) = (data['features'][0], data['mask_GT'][0], data['seg_len'][0], data['concept1_GT'][0], data['concept2_GT'][0], data['mask_GT'][0], data['oracle_summary'][0], data['tokens_pad1'][0], data['tokens_pad2'][0], data['tokens_pad1'][1], data['tokens_pad2'][1], data['saliency_pos_labels_1'][0], data['saliency_pos_labels_2'][0], data['saliency_pos_labels_oracle'][0])
else:
(features, mask, seg_len, src_txt_1, src_txt_2, src_txt_mask_1, src_txt_mask_2) = (data['features'][0], data['mask_GT'][0], data['seg_len'][0], data['tokens_pad1'][0], data['tokens_pad2'][0], data['tokens_pad1'][1], data['tokens_pad2'][1])
mask_GT = mask.to('cuda').reshape(1, (- 1)).bool()
seq = features.to('cuda').squeeze(0)
mask = mask.to('cuda').squeeze(0)
num_seg = seq.shape[0]
ctx_l = seq.shape[1]
tef_st = (torch.arange(0, ctx_l, 1.0) / ctx_l)
tef_ed = (tef_st + (1.0 / ctx_l))
tef = torch.stack([tef_st, tef_ed], dim=1).to('cuda')
tef = tef.squeeze(0).repeat(seq.shape[0], 1, 1)
seq = torch.cat([seq, tef], dim=(- 1))
src_txt_1 = src_txt_1.to(torch.float32).to('cuda').repeat(num_seg, 1, 1)
src_txt_2 = src_txt_2.to(torch.float32).to('cuda').repeat(num_seg, 1, 1)
src_txt_mask_1 = src_txt_mask_1.to('cuda').repeat(num_seg, 1)
src_txt_mask_2 = src_txt_mask_2.to('cuda').repeat(num_seg, 1)
src_txt_oracle = torch.cat((src_txt_1, src_txt_2), dim=1).to('cuda')
src_txt_mask_oracle = torch.cat((src_txt_mask_1, src_txt_mask_2), dim=1).to('cuda')
model_inputs_1 = dict(src_vid=seq, src_vid_mask=mask, src_txt=src_txt_1, src_txt_mask=src_txt_mask_1)
model_inputs_2 = dict(src_vid=seq, src_vid_mask=mask, src_txt=src_txt_2, src_txt_mask=src_txt_mask_2)
model_inputs_oracle = dict(src_vid=seq, src_vid_mask=mask, src_txt=src_txt_oracle, src_txt_mask=src_txt_mask_oracle)
if (not eval):
targets_1 = dict(saliency_scores=concept1_GT.to('cuda'), saliency_pos_labels=saliency_pos_labels_1.to('cuda'))
targets_2 = dict(saliency_scores=concept2_GT.to('cuda'), saliency_pos_labels=saliency_pos_labels_2.to('cuda'))
targets_oracle = dict(saliency_scores=oracle_summary_GT.to('cuda'), saliency_pos_labels=saliency_pos_labels_oracle.to('cuda'))
targets_1['timestamp_mask'] = mask
targets_1['timestamp_window'] = concept1_GT.to('cuda')
targets_2['timestamp_mask'] = mask
targets_2['timestamp_window'] = concept2_GT.to('cuda')
targets_oracle['timestamp_mask'] = mask
targets_oracle['timestamp_window'] = oracle_summary_GT.to('cuda')
return (model_inputs_1, model_inputs_2, model_inputs_oracle, targets_1, targets_2, targets_oracle, mask_GT)
else:
return (model_inputs_1, model_inputs_2, model_inputs_oracle, mask_GT) |
def get_info(path):
info = torchaudio.info(path)
if hasattr(info, 'num_frames'):
return Info(info.num_frames, info.sample_rate, info.num_channels)
else:
siginfo = info[0]
return Info((siginfo.length // siginfo.channels), siginfo.rate, siginfo.channels) |
_numpy_output()
def test_transpose_axes2(A: dace.float32[(10, 5, 3, 2)]):
return np.transpose(A, axes=[3, 0, 2]) |
def low_memory_matrix_op(func, x, y, x_split_axis, y_split_axis, x_num_splits, y_num_splits, verbose=False, aligned=True):
if verbose:
import sys
import time
printed = False
st = time.time()
last_time = time.time()
mat = [[] for _ in range(x_num_splits)]
for (i, part_x) in enumerate(np.array_split(x, x_num_splits, axis=x_split_axis)):
for (j, part_y) in enumerate(np.array_split(y, y_num_splits, axis=y_split_axis)):
part_mat = func(part_x, part_y, aligned)
mat[i].append(part_mat)
if verbose:
if (not printed):
printed = True
else:
sys.stdout.write('\x1b[F\x1b[K')
print('Matrix part ({}, {}) / ({}, {}), +{:.2f}s, total {:.2f}s'.format((i + 1), (j + 1), x_num_splits, y_num_splits, (time.time() - last_time), (time.time() - st)))
last_time = time.time()
mat[i] = np.concatenate(mat[i], axis=1)
mat = np.concatenate(mat, axis=0)
return mat |
def register_codecs():
def policy_encode(policy: jmp.Policy):
def name(dtype):
if hasattr(dtype, 'name'):
return dtype.name
elif hasattr(dtype, 'dtype'):
return name(dtype.dtype)
out = f'compute={name(policy.compute_dtype)},params={name(policy.param_dtype)},output={name(policy.output_dtype)}'
assert (jmp.get_policy(out) == policy)
return out
draccus.decode.register(jmp.Policy, (lambda policy_str: jmp.get_policy(policy_str)))
draccus.encode.register(jmp.Policy, policy_encode)
draccus.decode.register(timedelta, parse_timedelta)
draccus.encode.register(timedelta, encode_timedelta) |
def get_modifier(mention):
head_span_in_mention = spans.Span((mention.attributes['head_span'].begin - mention.span.begin), (mention.attributes['head_span'].end - mention.span.begin))
modifiers = set()
for (index, (token, pos)) in enumerate(zip(mention.attributes['tokens'], mention.attributes['pos'])):
if ((token.lower() not in ['the', 'this', 'that', 'those', 'these', 'a', 'an']) and (pos not in ['POS', 'IN']) and ((index < head_span_in_mention.begin) or (index > head_span_in_mention.end))):
modifiers.add(token.lower())
return modifiers |
def update_processor_add_transformer(resources, lang, current_processors, processor, transformer):
if (processor not in current_processors):
return
new_model = current_processors[processor].replace('_charlm', ('_' + transformer)).replace('_nocharlm', ('_' + transformer))
if (new_model in resources[lang][processor]):
current_processors[processor] = new_model
else:
print(('WARNING: wanted to use %s for %s accurate %s, but that model does not exist' % (new_model, lang, processor))) |
def test_spec_format():
import h5py
quantities = ['ArealMass', 'ChristodoulouMass', 'CoordCenterInertial', 'DimensionfulInertialSpin', 'DimensionfulInertialSpinMag', 'chiInertial', 'chiMagInertial']
with contextlib.redirect_stdout(None):
catalog = sxs.load('catalog')
selected = catalog.select_files(file_name)
selected_path = sxs.utilities.sxs_path_to_system_path(list(selected.values())[0]['truepath'])
with contextlib.redirect_stdout(None):
horizons = sxs.load(file_name)
cached_path = (sxs.sxs_directory('cache') / selected_path)
with h5py.File(cached_path, 'r') as f:
for horizon in 'ABC':
for quantity in quantities:
name = f'Ah{horizon}.dir/{quantity}.dat'
(a, b) = (f[name], horizons[name])
if ('Mag' in quantity):
assert np.allclose(a, b, atol=1e-11, rtol=1e-11)
else:
assert np.array_equal(a, b) |
class AutoModelForQuestionAnswering():
def __init__(self):
raise EnvironmentError('AutoModelForQuestionAnswering is designed to be instantiated using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or `AutoModelForQuestionAnswering.from_config(config)` methods.')
_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()):
return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()))))
_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING)
_start_docstrings('Instantiate one of the model classes of the library---with a question answering head---from a pretrained model.', AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()):
return MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())))) |
def test_UnmaskedArray_NumpyArray():
v2a = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3])))
def f(out, obj):
out[0] = len(obj)
out[1] = (obj[1] if (obj[1] is not None) else 999.0)
out[2] = (obj[3] if (obj[3] is not None) else 999.0)
out = np.zeros(3, dtype=np.float64)
f(out, ak.highlevel.Array(v2a))
assert (out.tolist() == [4.0, 1.1, 3.3]) |
def register_Ns3Icmpv6NS_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv6NS const &', 'arg0')])
cls.add_constructor([param('ns3::Ipv6Address', 'target')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetIpv6Target', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('GetReserved', 'uint32_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetIpv6Target', 'void', [param('ns3::Ipv6Address', 'target')])
cls.add_method('SetReserved', 'void', [param('uint32_t', 'reserved')])
return |
def generate_cpu_cuda_to_methods() -> Tuple[(str, str, str)]:
cpu = f'''
{tab}def cpu(self):
{dtab}return cpu(self)
'''
cuda = [f'{tab}def cuda(self, device=None):', f'''return cuda(self, device=device)
''']
to = [f'{tab}def to(self, *args, **kwargs):', 'return to(self, *args, **kwargs)']
return (cpu, f'''
{dtab}'''.join(cuda), f'''
{dtab}'''.join(to)) |
def resize_img(raw_img):
(w, h) = raw_img.size
scaling_factor = (240 / w)
resized_image = raw_img.resize((int((w * scaling_factor)), int((h * scaling_factor))))
return resized_image |
def plotly_plot(df, extra_df=None):
(traces, index) = ([], 0)
color_list = plotly.colors.qualitative.Dark24
for i in range(df.shape[1]):
v = df[[df.columns[i]]]
color = color_list[(index % len(color_list))]
traces.append(go.Scatter(name=f'{df.columns[i]}', x=v.index, y=v.values.flatten().astype(float), mode='lines', line=dict(color=color)))
index += 1
if (extra_df is not None):
for i in range(extra_df.shape[1]):
v = extra_df[[extra_df.columns[i]]]
color = color_list[(index % len(color_list))]
traces.append(go.Scatter(name=f'{extra_df.columns[i]}_extra', x=v.index, y=v.values.flatten().astype(float), mode='lines', line=dict(color=color)))
index += 1
layout = dict(showlegend=True, xaxis=dict(title='Time', type='date', rangeselector=dict(buttons=list([dict(count=7, label='1w', step='day', stepmode='backward'), dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(count=1, label='1y', step='year', stepmode='backward'), dict(step='all')]))))
fig = make_subplots(figure=go.Figure(layout=layout))
fig.update_yaxes(title_text='Timeseries')
for trace in traces:
fig.add_trace(trace)
return fig |
def _compute_lwork(routine, *args, **kwargs):
dtype = getattr(routine, 'dtype', None)
int_dtype = getattr(routine, 'int_dtype', None)
ret = routine(*args, **kwargs)
if (ret[(- 1)] != 0):
raise ValueError(('Internal work array size computation failed: %d' % (ret[(- 1)],)))
if (len(ret) == 2):
return _check_work_float(ret[0].real, dtype, int_dtype)
else:
return tuple((_check_work_float(x.real, dtype, int_dtype) for x in ret[:(- 1)])) |
def load_histology_shard(shard_num, collaborator_count, categorical=False, channels_last=False, **kwargs):
(img_rows, img_cols) = (150, 150)
num_classes = 8
((X_train, y_train), (X_valid, y_valid)) = _load_raw_datashards(shard_num, collaborator_count)
if channels_last:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_valid = X_valid.reshape(X_valid.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
else:
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_valid = X_valid.reshape(X_valid.shape[0], 3, img_rows, img_cols)
input_shape = (3, img_rows, img_cols)
logger.info(f'Histology > X_train Shape : {X_train.shape}')
logger.info(f'Histology > y_train Shape : {y_train.shape}')
logger.info(f'Histology > Train Samples : {X_train.shape[0]}')
logger.info(f'Histology > Valid Samples : {X_valid.shape[0]}')
if categorical:
y_train = one_hot(y_train, num_classes)
y_valid = one_hot(y_valid, num_classes)
return (input_shape, num_classes, X_train, y_train, X_valid, y_valid) |
class Dataset(object):
__metaclass__ = ABCMeta
def __init__(self, name, subset):
assert (subset in self.available_subsets()), self.available_subsets()
self.name = name
self.subset = subset
def num_classes(self):
pass
def num_examples_per_epoch(self):
pass
def download_message(self):
pass
def available_subsets(self):
return ['train', 'validation']
def data_files(self):
tf_record_pattern = os.path.join(FLAGS.data_dir, ('%s-*' % self.subset))
data_files = tf.gfile.Glob(tf_record_pattern)
if (not data_files):
print(('No files found for dataset %s/%s at %s' % (self.name, self.subset, FLAGS.data_dir)))
self.download_message()
exit((- 1))
return data_files
def reader(self):
return tf.TFRecordReader() |
def draw_bbox(img, bboxes, c=(255, 0, 255)):
for bbox in bboxes:
color = COLORS[int(bbox[5])]
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2, lineType=cv2.LINE_AA)
ct = [((bbox[0] + bbox[2]) / 2), ((bbox[1] + bbox[3]) / 2)]
txt = '{}'.format(int(bbox[4]))
cv2.putText(img, txt, (int(ct[0]), int(ct[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=1, lineType=cv2.LINE_AA) |
class SelectAlternatives(object):
def __init__(self, system, gold, fields='eid'):
self.system = system
self.gold = gold
self.fields = (fields.split(',') if (fields != '*') else '*')
def _get_key(self, candidate):
if (self.fields == '*'):
return (candidate.eid, candidate.__dict__)
return tuple((getattr(candidate, field, None) for field in self.fields))
def __call__(self):
system = self.system
if (not isinstance(system, list)):
system = Reader(utf8_open(system))
gold = self.gold
if (not isinstance(gold, list)):
gold = Reader(utf8_open(gold))
by_span = {}
by_doc = defaultdict(set)
by_collection = set()
for doc in system:
for ann in doc.annotations:
key = self._get_key(ann.candidates[0])
by_span[ann] = key
by_doc[ann.docid].add(key)
by_collection.add(key)
by_doc.default_factory = None
out = []
for doc in gold:
for ann in doc.annotations:
if (len(ann.candidates) <= 1):
out.append(str(ann))
continue
keys = [self._get_key(cand) for cand in ann.candidates]
try:
matched = keys.index(by_span[ann.span])
except (KeyError, IndexError):
try:
doc_keys = by_doc[ann.docid]
except KeyError:
doc_keys = set()
collection_match = None
for (i, key) in enumerate(keys):
if (key in doc_keys):
matched = i
break
if ((collection_match is None) and (key in by_collection)):
collection_match = i
else:
if (collection_match is None):
matched = 0
else:
matched = collection_match
ann.candidates = [ann.candidates[matched]]
out.append(str(ann))
return '\n'.join(out)
def add_arguments(cls, p):
p.add_argument('-f', '--fields', default='eid', help='Comma-delimited list of fields to match candidates at the same span between system and gold. "*" will require match on all fields; default is "eid".')
p.add_argument('-g', '--gold', required=True, help='Path to gold standard annotations')
p.add_argument('system', metavar='FILE', help='Path to system annotations')
p.set_defaults(cls=cls)
return p |
class XmodPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_defaultdict_config():
lang_configs = defaultdict((lambda : dict(processors='tokenize')))
run_multilingual_pipeline(en_has_dependencies=False, fr_has_dependencies=False, lang_configs=lang_configs)
lang_configs = defaultdict((lambda : dict(processors='tokenize')))
lang_configs['en'] = {'processors': 'tokenize,pos,lemma,depparse'}
run_multilingual_pipeline(en_has_dependencies=True, fr_has_dependencies=False, lang_configs=lang_configs) |
def test():
index = ak.Array(ak.contents.ListOffsetArray(ak.index.Index64([0, 3, 5]), ak.contents.NumpyArray(np.array([True, False, False, True, True, False, False], dtype=np.bool_))))
array = ak.Array([[0, 1, 2], [3, 4]])
result = array[index]
assert (result.tolist() == [[0], [3, 4]]) |
def ud_scores(gold_conllu_file, system_conllu_file):
try:
gold_ud = ud_eval.load_conllu_file(gold_conllu_file)
except UDError as e:
raise UDError(('Could not read %s' % gold_conllu_file)) from e
try:
system_ud = ud_eval.load_conllu_file(system_conllu_file)
except UDError as e:
raise UDError(('Could not read %s' % system_conllu_file)) from e
evaluation = ud_eval.evaluate(gold_ud, system_ud)
return evaluation |
_function_dispatch(_fftn_dispatcher)
def ifft2(a, s=None, axes=((- 2), (- 1)), norm=None):
return _raw_fftnd(a, s, axes, ifft, norm) |
def test_aposteriori():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
rng = np.random.RandomState(123456)
a_posteriori = APosteriori(pool_classifiers, random_state=rng)
a_posteriori.fit(X_dsel, y_dsel)
assert np.isclose(a_posteriori.score(X_test, y_test), 0.) |
class GroupOp(Operation):
def __init__(self, opd_id: str, op_type: OperationType, ops: List[Operation], attrs: Attributes, input_types: List[Type], output_types: List[Type], loc_label: LocLabel) -> None:
assert isinstance(opd_id, str)
assert (':' not in opd_id)
if (len(output_types) == 1):
opd_ids = [opd_id]
else:
opd_ids = [f'{opd_id}#{i}' for i in range(len(output_types))]
super().__init__(opd_ids, op_type, input_types, output_types, loc_label, attrs)
self.ops = ops
def dump_head(self):
prefix = self.opd_ids[0].split('#')[0]
number = len(self.opd_ids)
op_id_str = f'{prefix}:{number}'
return f'{op_id_str} = {self.op_type.dump()} ({{'
def dump_tail(self):
attrs_str = self._attrs.dump()
input_types_str = Type.dump_type_list(self.input_types, force_list=True)
output_types_str = Type.dump_type_list(self.output_types)
loc_label_str = self.loc_label.dump()
return f'}}) {attrs_str} : {input_types_str} -> {output_types_str} {loc_label_str}'
def dump(self):
head = self.dump_head()
tail = self.dump_tail()
ops_str = '\n'.join([i.dump() for i in self.ops])
ops_str = textwrap.indent(ops_str, SPACE)
return f'''{head}
{ops_str}
{tail}''' |
class ConvReLU3d(nnq.Conv3d):
_FLOAT_MODULE = torch.nn.intrinsic.ConvReLU3d
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
assert (padding_mode != 'reflect'), 'Conv3d does not support reflection padding'
super(ConvReLU3d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode)
def forward(self, input):
if (len(input.shape) != 5):
raise ValueError('Input shape must be `(N, C, D, H, W)`!')
if (self.padding_mode != 'zeros'):
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice, mode=self.padding_mode)
return torch.ops.quantized.conv3d_relu(input, self._packed_params, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedConvReLU3d'
def from_float(cls, mod):
if (type(mod) == torch.nn.intrinsic.qat.ConvBnReLU3d):
(mod.weight, mod.bias) = fuse_conv_bn_weights(mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var, mod.bn.eps, mod.bn.weight, mod.bn.bias)
return super(ConvReLU3d, cls).from_float(mod) |
def SVD_perSlice(G_times, directed=True, num_eigen=6, top=True, max_size=500):
Temporal_eigenvalues = []
activity_vecs = []
counter = 0
for G in G_times:
if (len(G) < max_size):
for i in range(len(G), max_size):
G.add_node(((- 1) * i))
if directed:
L = nx.directed_laplacian_matrix(G)
else:
L = nx.laplacian_matrix(G)
L = L.asfptype()
if top:
which = 'LM'
else:
which = 'SM'
(u, s, vh) = svds(L, k=num_eigen, which=which)
vals = s
vecs = u
max_index = list(vals).index(max(list(vals)))
activity_vecs.append(np.asarray(vecs[max_index]))
Temporal_eigenvalues.append(np.asarray(vals))
print(('processing ' + str(counter)), end='\r')
counter = (counter + 1)
return (Temporal_eigenvalues, activity_vecs) |
def original_fid(F):
return (((F ** 2) + (((1 - F) / 3) ** 2)) / (((F ** 2) + (((2 * F) * (1 - F)) / 3)) + (5 * (((1 - F) / 3) ** 2)))) |
class GenerationMsgType(Enum):
NEGOTIATE = auto()
NEGOTIATE_ACK = auto()
MEAS_RES = auto() |
class BaseDataset(Dataset):
def __init__(self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[]):
self.vis_root = vis_root
self.annotation = []
for ann_path in ann_paths:
self.annotation.extend(json.load(open(ann_path, 'r'))['annotations'])
self.vis_processor = vis_processor
self.text_processor = text_processor
self._add_instance_ids()
def __len__(self):
return len(self.annotation)
def collater(self, samples):
return default_collate(samples)
def set_processors(self, vis_processor, text_processor):
self.vis_processor = vis_processor
self.text_processor = text_processor
def _add_instance_ids(self, key='instance_id'):
for (idx, ann) in enumerate(self.annotation):
ann[key] = str(idx) |
def _eval_func_get_epoch(self: LayerBase, **_kwargs) -> tf.Tensor:
run_opts = self.network.get_root_network().get_run_opts()
def _py_func_get_epoch() -> int:
return run_opts['epoch']
(epoch,) = tf_compat.v1.py_func(_py_func_get_epoch, [], [tf.int32], stateful=True)
assert isinstance(epoch, tf.Tensor)
epoch.set_shape(())
return epoch |
def splint(xa, ya, y2a, n, x):
klo = 0
khi = (n - 1)
while ((khi - klo) > 1):
k = ((khi + klo) >> 1)
if (xa[k] > x):
khi = k
else:
klo = k
h = (xa[khi] - xa[klo])
if (h == 0):
print('Bad xa input to routine splint')
return 1e309
a = ((xa[khi] - x) / h)
b = ((x - xa[klo]) / h)
y = (((a * ya[klo]) + (b * ya[khi])) + ((((((a ** 3) - a) * y2a[klo]) + (((b ** 3) - b) * y2a[khi])) * (h ** 2)) / 6.0))
return y |
def convert_to_string(data):
if isinstance(data, bytes):
return data.decode('utf-8')
elif isinstance(data, list):
return [convert_to_string(d) for d in data]
else:
return data |
def merge_csvs(ins, out):
count = 0
with open(out, 'a+') as out_f:
for in_file in sorted(ins):
with open(in_file, 'r') as in_f:
for line in in_f:
out_f.write(line)
count += 1
return count |
def test_propagate_strict():
strict_sdfg = propagate_strict.to_sdfg(simplify=True)
assert (len(list(strict_sdfg.all_sdfgs_recursive())) == 1)
non_strict_sdfg = propagate_strict.to_sdfg(simplify=False)
assert (len(list(non_strict_sdfg.all_sdfgs_recursive())) > 1) |
class CudaError(RuntimeError):
def __init__(self, code):
msg = cudart().cudaGetErrorString(code).decode('utf-8')
super(CudaError, self).__init__('{0} ({1})'.format(msg, code)) |
class AlproBaseDataset(Dataset):
def __init__(self, datalist, tokenizer, img_lmdb_dir, img_db_type='lmdb', fps=3, num_frm=3, frm_sampling_strategy='rand', max_img_size=(- 1), max_txt_len=20):
self.fps = fps
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.datalist = datalist
self.tokenizer = tokenizer
self.max_txt_len = max_txt_len
self.max_img_size = max_img_size
self.img_resize = ImageResize(max_img_size, 'bilinear')
self.img_pad = ImagePad(max_img_size, max_img_size)
self.img_db_type = img_db_type
assert (img_db_type in ['lmdb', 'rawvideo']), "Invalid type for img_db_type, expected {'lmdb', 'rawvideo'}, found {}.".format(img_db_type)
if (self.img_db_type == 'lmdb'):
self.env = lmdb.open(img_lmdb_dir, readonly=True, create=False)
self.txn = self.env.begin(buffers=True)
else:
self.img_db_dir = img_lmdb_dir
def __len__(self):
return len(self.datalist)
def __getitem__(self, index):
raise NotImplementedError
def _load_img(self, img_id):
raw_img = load_decompress_img_from_lmdb_value(self.txn.get(str(img_id).encode('utf-8')))
image_np = np.array(raw_img, dtype=np.uint8)
raw_img_tensor = image_to_tensor(image_np, keepdim=False).float()
resized_img = self.img_resize(raw_img_tensor)
transformed_img = self.img_pad(resized_img)
return transformed_img
def _is_extreme_aspect_ratio(cls, tensor, max_ratio=5.0):
(h, w) = tensor.shape[(- 2):]
return (((h / float(w)) > max_ratio) or ((h / float(w)) < (1 / max_ratio)))
def _load_video(self, video_id, num_clips=None, clip_idx=None, safeguard_duration=False, video_max_pts=None):
assert ((num_clips is None) == (clip_idx is None)), 'Both None, or both not None'
io_stream = io.BytesIO(self.txn.get(str(video_id).encode('utf-8')))
(raw_sampled_frms, video_max_pts) = extract_frames_from_video_binary(io_stream, target_fps=self.fps, num_frames=self.num_frm, multi_thread_decode=False, sampling_strategy=self.frm_sampling_strategy, num_clips=num_clips, clip_idx=clip_idx, safeguard_duration=safeguard_duration, video_max_pts=video_max_pts)
if (raw_sampled_frms is None):
return (None, None)
elif self._is_extreme_aspect_ratio(raw_sampled_frms, max_ratio=5.0):
print(f'Found extreme aspect ratio for video id {video_id}. Skip it')
return (None, None)
raw_sampled_frms = raw_sampled_frms.float()
resized_frms = self.img_resize(raw_sampled_frms)
padded_frms = self.img_pad(resized_frms)
return (padded_frms, video_max_pts)
def _load_video_from_path_decord(self, video_path, height=None, width=None, start_time=None, end_time=None, fps=(- 1)):
try:
if ((not height) or (not width)):
vr = VideoReader(video_path)
else:
vr = VideoReader(video_path, width=width, height=height)
vlen = len(vr)
if (start_time or end_time):
assert (fps > 0), 'must provide video fps if specifying start and end time.'
start_idx = min(int((start_time * fps)), vlen)
end_idx = min(int((end_time * fps)), vlen)
else:
(start_idx, end_idx) = (0, vlen)
if (self.frm_sampling_strategy == 'uniform'):
frame_indices = np.arange(start_idx, end_idx, (vlen / self.num_frm), dtype=int)
elif (self.frm_sampling_strategy == 'nlvl_uniform'):
frame_indices = np.arange(start_idx, end_idx, (vlen / self.num_frm)).astype(int)
elif (self.frm_sampling_strategy == 'nlvl_rand'):
frame_indices = np.arange(start_idx, end_idx, (vlen / self.num_frm)).astype(int)
strides = ([(frame_indices[i] - frame_indices[(i - 1)]) for i in range(1, len(frame_indices))] + [(vlen - frame_indices[(- 1)])])
pertube = np.array([np.random.randint(0, stride) for stride in strides])
frame_indices = (frame_indices + pertube)
elif (self.frm_sampling_strategy == 'rand'):
frame_indices = sorted(random.sample(range(vlen), self.num_frm))
elif (self.frm_sampling_strategy == 'headtail'):
frame_indices_head = sorted(random.sample(range((vlen // 2)), (self.num_frm // 2)))
frame_indices_tail = sorted(random.sample(range((vlen // 2), vlen), (self.num_frm // 2)))
frame_indices = (frame_indices_head + frame_indices_tail)
else:
raise NotImplementedError('Invalid sampling strategy {} '.format(self.frm_sampling_strategy))
raw_sample_frms = vr.get_batch(frame_indices)
except Exception as e:
return None
raw_sample_frms = raw_sample_frms.permute(0, 3, 1, 2)
return raw_sample_frms |
()
('--batch_size', type=int, default=4000)
_experiment
def trpo_cubecrash(ctxt=None, seed=1, batch_size=4000):
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = GarageEnv(normalize(gym.make('CubeCrash-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec, filters=((32, (8, 8)), (64, (4, 4))), strides=(4, 2), padding='VALID', hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(env_spec=env.spec, regressor_args=dict(filters=((32, (8, 8)), (64, (4, 4))), strides=(4, 2), padding='VALID', hidden_sizes=(32, 32), use_trust_region=True))
algo = TRPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, policy_ent_coeff=0.0, flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=batch_size) |
def multiple_databases():
os.makedirs(DB_PATH)
_ = SingleDatabase(db_path=DB_PATH, db_name=f'{DB_NAME}_1', tables={TABLE_NAME: TABLE_DATAFRAME})
_ = SingleDatabase(db_path=DB_PATH, db_name=f'{DB_NAME}_2', tables={TABLE_NAME: TABLE_DATAFRAME})
_ = SingleDatabase(db_path=DB_PATH, db_name=f'{DB_NAME}_3', tables={TABLE_NAME: TABLE_DATAFRAME})
(yield MultipleDatabases(DB_PATH))
shutil.rmtree(DB_PATH) |
def maxp(cg, priority=3, background_knowledge=None):
assert (priority in [0, 1, 2, 3, 4])
cg_new = deepcopy(cg)
UC_dict = {}
UT = [(i, j, k) for (i, j, k) in cg_new.find_unshielded_triples() if (i < k)]
for (x, y, z) in UT:
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[x], cg_new.G.nodes[y]) or background_knowledge.is_forbidden(cg_new.G.nodes[z], cg_new.G.nodes[y]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[x]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[z]))):
continue
cond_with_y = cg_new.find_cond_sets_with_mid(x, z, y)
cond_without_y = cg_new.find_cond_sets_without_mid(x, z, y)
max_p_contain_y = max([cg_new.ci_test(x, z, S) for S in cond_with_y])
max_p_not_contain_y = max([cg_new.ci_test(x, z, S) for S in cond_without_y])
if (max_p_not_contain_y > max_p_contain_y):
if (priority == 0):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
edge2 = cg_new.G.get_edge(cg_new.G.nodes[y], cg_new.G.nodes[x])
if (edge2 is not None):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge3 = cg_new.G.get_edge(cg_new.G.nodes[y], cg_new.G.nodes[z])
if (edge3 is not None):
cg_new.G.remove_edge(edge3)
edge4 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge4 is not None):
cg_new.G.remove_edge(edge4)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif (priority == 1):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is None):
if ((cg_new.G.graph[(x, y)] == Endpoint.TAIL.value) and (cg_new.G.graph[(y, x)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif ((cg_new.G.graph[(x, y)] == Endpoint.ARROW.value) and (cg_new.G.graph[(y, x)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.ARROW, Endpoint.ARROW))
else:
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge2 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge2 is None):
if ((cg_new.G.graph[(z, y)] == Endpoint.TAIL.value) and (cg_new.G.graph[(y, z)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif ((cg_new.G.graph[(z, y)] == Endpoint.ARROW.value) and (cg_new.G.graph[(y, z)] == Endpoint.TAIL.value)):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.ARROW, Endpoint.ARROW))
else:
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif (priority == 2):
if ((not cg_new.is_fully_directed(y, x)) and (not cg_new.is_fully_directed(y, z))):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge2 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge2 is not None):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
elif (priority == 3):
UC_dict[(x, y, z)] = max_p_contain_y
elif (priority == 4):
UC_dict[(x, y, z)] = max_p_not_contain_y
if (priority in [0, 1, 2]):
return cg_new
else:
if (priority == 3):
UC_dict = sort_dict_ascending(UC_dict)
else:
UC_dict = sort_dict_ascending(UC_dict, True)
for (x, y, z) in UC_dict.keys():
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg_new.G.nodes[x], cg_new.G.nodes[y]) or background_knowledge.is_forbidden(cg_new.G.nodes[z], cg_new.G.nodes[y]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[x]) or background_knowledge.is_required(cg_new.G.nodes[y], cg_new.G.nodes[z]))):
continue
if ((not cg_new.is_fully_directed(y, x)) and (not cg_new.is_fully_directed(y, z))):
edge1 = cg_new.G.get_edge(cg_new.G.nodes[x], cg_new.G.nodes[y])
if (edge1 is not None):
cg_new.G.remove_edge(edge1)
cg_new.G.add_edge(Edge(cg_new.G.nodes[x], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
edge2 = cg_new.G.get_edge(cg_new.G.nodes[z], cg_new.G.nodes[y])
if (edge2 is not None):
cg_new.G.remove_edge(edge2)
cg_new.G.add_edge(Edge(cg_new.G.nodes[z], cg_new.G.nodes[y], Endpoint.TAIL, Endpoint.ARROW))
return cg_new |
def default_regression_model(num_anchors, pyramid_feature_size=256, regression_feature_size=256, name='regression_submodel'):
options = {'kernel_size': 3, 'strides': 1, 'padding': 'same', 'kernel_initializer': keras.initializers.normal(mean=0.0, stddev=0.01, seed=None), 'bias_initializer': 'zeros'}
inputs = keras.layers.Input(shape=(None, None, pyramid_feature_size))
outputs = inputs
for i in range(4):
outputs = keras.layers.Conv2D(filters=regression_feature_size, activation='relu', name='pyramid_regression_{}'.format(i), **options)(outputs)
outputs = keras.layers.Conv2D(300, name='pyramid_regression_shafin1', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), 300), name='pyramid_regression_shafin2')(outputs)
outputs = MyLayer_w2v.MyLayer(output_dim=65)(outputs)
outputs = keras.layers.Reshape(((- 1), 1, 65))(outputs)
outputs = keras.layers.Conv2D((num_anchors * 4), name='pyramid_regression', **options)(outputs)
outputs = keras.layers.Reshape(((- 1), 4), name='pyramid_regression_reshape')(outputs)
return keras.models.Model(inputs=inputs, outputs=outputs, name=name) |
.parametrize('reference', [0.0, [0.0], [[0.0]]])
def test_pareto_hypervolume_indicator_raises_for_reference_with_invalid_shape(reference: SequenceN[float]) -> None:
pareto = Pareto(tf.constant([[(- 1.0), (- 0.6)], [(- 0.8), (- 0.7)], [(- 0.6), (- 1.1)]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
pareto.hypervolume_indicator(tf.constant(reference)) |
def masked_loss(loss_fn, pred, data, mask):
return (loss_fn(pred, data.expand_as(pred), reduction='none') * mask) |
class LoggingBackend(GenericBackend):
def __init__(self, backend, printing=True, doctest=None, test_method=None, base_ring=None):
self._backend = backend
self._printing = printing
self._doctest = doctest
self._test_method = test_method
self._base_ring = base_ring
def __getattr__(self, attr):
_a = getattr(self._backend, attr)
if callable(_a):
import types
_mm = types.MethodType(_make_wrapper(self._backend, attr), self)
setattr(self, attr, _mm)
return _mm
else:
return _a
def base_ring(self):
if (self._base_ring is not None):
return self._base_ring
else:
return self._backend.base_ring() |
class SymmetricGraphPreProcessingLayer(Layer):
def __init__(self, num_of_nodes, **kwargs):
self.output_dims = (num_of_nodes, num_of_nodes)
super().__init__(**kwargs)
def build(self, input_shape):
super().build(input_shape)
def call(self, adj):
adj_T = tf.transpose(adj)
adj = ((adj + tf.multiply(adj_T, tf.where((adj_T > adj), tf.ones_like(adj), tf.zeros_like(adj)))) - tf.multiply(adj, tf.where((adj_T > adj), tf.ones_like(adj), tf.zeros_like(adj))))
adj = (adj + tf.linalg.diag((tf.ones(adj.shape[0]) - tf.diag_part(adj))))
rowsum = tf.reduce_sum(adj, 1)
d_mat_inv_sqrt = tf.diag(tf.rsqrt(rowsum))
adj_normalized = tf.matmul(tf.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return adj_normalized |
def test_process_predictions_zeros(example_diversity_ones_zeros):
(y, y_pred_ones, y_pred_zeros) = example_diversity_ones_zeros
(N00, N10, N01, N11) = _process_predictions(y, y_pred_zeros, y_pred_zeros)
assert ((N00 == (6.0 / 15.0)) and (N11 == (9.0 / 15.0)) and (N01 == 0.0) and (N10 == 0.0)) |
def debug(env, obs, agent_info):
try:
import matplotlib.pyplot as plt
except ImportError as e:
print('could not import matplotlib')
global ax1
global ax2
if (ax1 is None):
(_, (ax1, ax2)) = plt.subplots(1, 2)
subgoal_seq = agent_info['subgoal_seq']
planned_action_seq = agent_info['planned_action_seq']
real_obs_seq = env.true_states(obs, planned_action_seq)
ax1.clear()
env.plot_trajectory(ax1, np.array(subgoal_seq), np.array(planned_action_seq), goal=env._target_position)
ax1.set_title('imagined')
ax2.clear()
env.plot_trajectory(ax2, np.array(real_obs_seq), np.array(planned_action_seq), goal=env._target_position)
ax2.set_title('real')
plt.draw()
plt.pause(0.001) |
def resize_output(t, height, width, channels):
return tf.image.resize_bilinear(t, [height, width]) |
def test_1d_1d_different_dtypes_stride_trick():
data = np.array([101], dtype=np.int64)
array = np.lib.stride_tricks.as_strided(data, (40,), strides=(0,))
container = {'node0-data': array}
form = '\n {\n "class": "NumpyArray",\n "primitive": "int32",\n "form_key": "node0"\n }\n '
with pytest.raises(ValueError, match='contiguous'):
ak.from_buffers(form, array.size, container, highlevel=False) |
class AssertNoJIT():
def __enter__(self):
import os
enabled = os.environ.get('PYTORCH_JIT', 1)
assert (not enabled)
def __exit__(self, *args, **kwargs):
pass |
def add_lsh_self_attention_layer(d, input, output, inside_rec_layer=True, past_only=None, time_axis=None, *, num_heads=8, num_rounds=1, key_dim=64, value_dim=64, dropout=0.0, num_hashes, chunk_size, chunks_before=None, chunks_after=None, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), mask_current=True, small_mask_value=float((- (10 ** 5))), share_key_query=True, normalize_keys=None, mask_different_hashes=True, allow_duplicate_attention=False, chunk_alignment, shuffle_kv=False, debug_print=False):
if (past_only is None):
past_only = inside_rec_layer
if (time_axis is None):
time_axis = ('stag:extern_data:classes' if inside_rec_layer else 'stag:extern_data:data')
assert time_axis.startswith('stag:')
assert ((not inside_rec_layer) or past_only)
if (normalize_keys is None):
normalize_keys = share_key_query
if share_key_query:
d[(output + '_qv0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * (key_dim + value_dim)), 'forward_weights_init': ff_init}
d[(output + '_qv_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, (key_dim + value_dim)), 'from': [(output + '_qv0')]}
d[(output + '_qv')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_qv_unnamed')]}
d[(output + '_qv_split')] = {'class': 'split', 'axis': 'F', 'size_splits': (key_dim, value_dim), 'from': [(output + '_qv')]}
d[(output + '_query')] = {'class': 'copy', 'from': [(output + '_qv_split/0')]}
if normalize_keys:
d[(output + '_key')] = {'class': 'eval', 'eval': normalize_eval, 'from': [(output + '_query')]}
else:
d[(output + '_key')] = {'class': 'copy', 'from': [(output + '_query')]}
d[(output + '_value')] = {'class': 'copy', 'from': [(output + '_qv_split/1')]}
else:
d[(output + '_qkv0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * ((2 * key_dim) + value_dim)), 'forward_weights_init': ff_init}
d[(output + '_qkv_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, ((2 * key_dim) + value_dim)), 'from': [(output + '_qkv0')]}
d[(output + '_qkv')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_qkv_unnamed')]}
d[(output + '_qkv_split')] = {'class': 'split', 'axis': 'F', 'size_splits': (key_dim, key_dim, value_dim), 'from': [(output + '_qkv')]}
d[(output + '_query')] = {'class': 'copy', 'from': [(output + '_qkv_split/0')]}
if normalize_keys:
d[(output + '_key')] = {'class': 'eval', 'eval': normalize_eval, 'from': [(output + '_qkv_split/1')]}
else:
d[(output + '_key')] = {'class': 'copy', 'from': [(output + '_qkv_split/1')]}
d[(output + '_value')] = {'class': 'copy', 'from': [(output + '_qkv_split/2')]}
if inside_rec_layer:
(queries_input, keys_input, values_input) = ((output + '_query_accum'), (output + '_key_accum'), (output + '_value_accum'))
for qkv in ('query', 'key', 'value'):
d[(output + ('_%s_accum' % qkv))] = {'class': 'cum_concat', 'from': [(output + ('_%s' % qkv))], 'axis': time_axis}
time_axis_ = 'stag:rec-history'
else:
(queries_input, keys_input, values_input) = ((output + '_query'), (output + '_key'), (output + '_value'))
time_axis_ = time_axis
generic_add_lsh_attention_layer(d, queries_input=queries_input, keys_input=keys_input, values_input=values_input, output=output, query_time_axis=time_axis_, key_time_axis=time_axis_, num_heads=num_heads, num_rounds=num_rounds, key_dim=key_dim, value_dim=value_dim, dropout=dropout, num_hashes=num_hashes, query_chunk_size=chunk_size, key_chunk_size=chunk_size, key_chunks_before=chunks_before, key_chunks_after=chunks_after, hash_init=ff_init, small_mask_value=small_mask_value, past_only=past_only, mask_current=mask_current, mask_different_hashes=mask_different_hashes, allow_duplicate_attention=allow_duplicate_attention, chunk_alignment=chunk_alignment, shuffle_kv=shuffle_kv, debug_print=debug_print)
if inside_rec_layer:
d[(output + '_att')] = {'class': 'gather', 'from': [(output + '_att_all')], 'position': ':i', 'axis': time_axis_}
else:
d[(output + '_att')] = {'class': 'copy', 'from': [(output + '_att_all')]} |
def get_norm(norm, out_channels, num_gn_groups=32):
if isinstance(norm, str):
if (len(norm) == 0):
return None
norm = {'BN': BatchNorm2d, 'SyncBN': (NaiveSyncBatchNorm if (env.TORCH_VERSION <= (1, 5)) else nn.SyncBatchNorm), 'FrozenBN': FrozenBatchNorm2d, 'GN': (lambda channels: nn.GroupNorm(num_gn_groups, channels)), 'nnSyncBN': nn.SyncBatchNorm, 'naiveSyncBN': NaiveSyncBatchNorm}[norm]
return norm(out_channels) |
def test_case146():
url = (brokerIp + '/ngsi-ld/v1/entityOperations/upsert')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata145), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 204) |
def get_all_data(pairs, n_objs):
text = {}
for (src, tgt) in pairs:
pair = f'{src}-{tgt}'
cmd = f'sacrebleu -t wmt19 -l {pair} --echo src'.split()
src_lines = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()
cmd = f'sacrebleu -t wmt19 -l {pair} --echo ref'.split()
tgt_lines = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()
text[pair] = {'src': src_lines[:n_objs], 'tgt': tgt_lines[:n_objs]}
return text |
def divide_int_str_attributes(files, attrs):
(str_attr, int_attr) = ([], [])
for a in attrs:
if (a == 'n'):
if (a not in int_attr):
int_attr.append(a)
for i in files:
with open(i, 'r') as f:
columns = f.readline()[:(- 1)].split(',')
if (a in columns):
ind = columns.index(a)
try:
if ((f.readline()[:(- 1)].split(',')[ind] == '0') or int(f.readline()[:(- 1)].split(',')[ind])):
if (a not in int_attr):
int_attr.append(a)
except:
if (a not in str_attr):
str_attr.append(a)
return (str_attr, int_attr) |
def validate(opt, val_loader, model):
(img_embs, cap_embs) = encode_data(model, val_loader, opt.log_step, logging.info)
(r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, measure=opt.measure)
logging.info(('Image to text: %.1f, %.1f, %.1f, %.1f, %.1f' % (r1, r5, r10, medr, meanr)))
(r1i, r5i, r10i, medri, meanr) = t2i(img_embs, cap_embs, measure=opt.measure)
logging.info(('Text to image: %.1f, %.1f, %.1f, %.1f, %.1f' % (r1i, r5i, r10i, medri, meanr)))
currscore = (((r1 + r5) + r1i) + r5i)
tb_logger.log_value('r1', r1, step=model.Eiters)
tb_logger.log_value('r5', r5, step=model.Eiters)
tb_logger.log_value('r10', r10, step=model.Eiters)
tb_logger.log_value('medr', medr, step=model.Eiters)
tb_logger.log_value('meanr', meanr, step=model.Eiters)
tb_logger.log_value('r1i', r1i, step=model.Eiters)
tb_logger.log_value('r5i', r5i, step=model.Eiters)
tb_logger.log_value('r10i', r10i, step=model.Eiters)
tb_logger.log_value('medri', medri, step=model.Eiters)
tb_logger.log_value('meanr', meanr, step=model.Eiters)
tb_logger.log_value('rsum', currscore, step=model.Eiters)
return currscore |
(ignore_result=True)
def execute_user_task():
seeds = SeedidsOper.get_seed_ids()
if seeds:
for seed in seeds:
app.send_task('tasks.user.crawl_person_infos', args=(seed.uid,), queue='user_crawler', routing_key='for_user_info') |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('upstream', help='The upstream name. E.g. wav2vec2')
parser.add_argument('problem', help='The problem module. E.g. s3prl.problem.SuperbSID')
parser.add_argument('dataset_root', help='The dataset root of your problem.')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--feature_selection', default='hidden_states')
parser.add_argument('--n_jobs', type=int, default=6)
parser.add_argument('--override', default=None, help='Override the default_config of the problem module. E.g. --override ValidSampler.batch_size=4,,TestSampler.batch_size=4')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--seed', type=int, default=1337)
args = parser.parse_args()
fix_random_seeds(args.seed)
problem = qualname_to_cls(args.problem)
config = Container(deepcopy(problem.default_config))
for (key, value) in vars(args).items():
if (key not in ['override']):
config[key] = value
if args.dryrun:
config.override(DRYRUN_CONFIG)
if (isinstance(args.override, str) and (len(args.override) > 0)):
override_dict = parse_override(args.override)
config.override(override_dict)
return (problem, config) |
class MSMT17(BaseImageDataset):
dataset_dir = 'msmt17'
def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs):
super(MSMT17, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'MSMT17_V2/mask_train_v2')
self.test_dir = osp.join(self.dataset_dir, 'MSMT17_V2/mask_test_v2')
self.list_train_path = osp.join(self.dataset_dir, 'MSMT17_V2/list_train.txt')
self.list_val_path = osp.join(self.dataset_dir, 'MSMT17_V2/list_val.txt')
self.list_query_path = osp.join(self.dataset_dir, 'MSMT17_V2/list_query.txt')
self.list_gallery_path = osp.join(self.dataset_dir, 'MSMT17_V2/list_gallery.txt')
self._check_before_run()
train = self._process_dir(self.train_dir, self.list_train_path)
query = self._process_dir(self.test_dir, self.list_query_path)
gallery = self._process_dir(self.test_dir, self.list_gallery_path)
if verbose:
print('=> MSMT17 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.test_dir)):
raise RuntimeError("'{}' is not available".format(self.test_dir))
def _process_dir(self, dir_path, list_path):
with open(list_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2])
img_path = osp.join(dir_path, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
for (idx, pid) in enumerate(pid_container):
assert (idx == pid), 'See code comment for explanation'
return dataset |
class AttnConnector(nn.Module):
def __init__(self, rnn_cell, query_size, key_size, content_size, output_size, attn_size):
super(AttnConnector, self).__init__()
self.query_embed = nn.Linear(query_size, attn_size)
self.key_embed = nn.Linear(key_size, attn_size)
self.attn_w = nn.Linear(attn_size, 1)
if (rnn_cell == 'lstm'):
self.project_h = nn.Linear((content_size + query_size), output_size)
self.project_c = nn.Linear((content_size + query_size), output_size)
else:
self.project = nn.Linear((content_size + query_size), output_size)
self.rnn_cell = rnn_cell
self.query_size = query_size
self.key_size = key_size
self.content_size = content_size
self.output_size = output_size
def forward(self, queries, keys, contents):
batch_size = keys.size(0)
num_key = keys.size(1)
query_embeded = self.query_embed(queries)
key_embeded = self.key_embed(keys)
tiled_query = query_embeded.unsqueeze(1).repeat(1, num_key, 1)
fc1 = F.tanh((tiled_query + key_embeded))
attn = self.attn_w(fc1).squeeze((- 1))
attn = F.sigmoid(attn.view((- 1), num_key)).view(batch_size, (- 1), num_key)
mix = torch.bmm(attn, contents).squeeze(1)
out = torch.cat([mix, queries], dim=1)
if (self.rnn_cell == 'lstm'):
h = self.project_h(out).unsqueeze(0)
c = self.project_c(out).unsqueeze(0)
new_s = (h, c)
else:
new_s = self.project(out).unsqueeze(0)
return new_s |
_native_function
def compute_declaration_yaml(f: NativeFunction) -> object:
(returns, name_to_field_name) = compute_returns_yaml(f)
kwarg_only_set = set((a.name for a in f.func.kwarg_only_arguments))
out_arg_set = set((a.name for a in f.func.out_arguments))
cpp_args = cpp.arguments(f.func)
arguments = [compute_cpp_argument_yaml(cpp_a, schema_order=False, kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name) for cpp_a in cpp_args]
schema_order_jit_arguments = list(f.func.schema_order_arguments())
schema_order_arguments = [compute_argument_yaml(a, schema_order=True, kwarg_only_set=kwarg_only_set, out_arg_set=out_arg_set, name_to_field_name=name_to_field_name) for a in schema_order_jit_arguments]
cpp_schema_order_types = [cpp.argument(a).type for a in schema_order_jit_arguments]
cpp_returns = cpp.returns_type(f.func.returns)
schema_order_cpp_signature = f"{cpp_returns} ({', '.join(cpp_schema_order_types)})"
is_factory_method = (any((isinstance(a.argument, TensorOptionsArguments) for a in cpp_args)) and (Variant.method not in f.variants))
return OrderedDict([('name', cpp.name(f.func)), ('operator_name', str(f.func.name.name)), ('overload_name', str(f.func.name.overload_name)), ('use_c10_dispatcher', f.use_c10_dispatcher.name), ('manual_kernel_registration', f.manual_kernel_registration), ('category_override', (f.category_override if (f.category_override is not None) else '')), ('matches_jit_signature', True), ('schema_string', f'aten::{f.func}'), ('arguments', arguments), ('schema_order_cpp_signature', schema_order_cpp_signature), ('schema_order_arguments', schema_order_arguments), ('method_of', compute_method_of_yaml(f.variants)), ('mode', 'native'), ('python_module', ('' if (f.python_module is None) else f.python_module)), ('returns', returns), ('inplace', f.func.name.name.inplace), ('is_factory_method', is_factory_method), ('abstract', (f.dispatch is not None)), ('device_guard', f.device_guard), ('with_gil', False), ('deprecated', False), ('has_math_kernel', ((f.dispatch is not None) and ('Math' in f.dispatch)))]) |
class ResNetDecoder(Generator):
def __init__(self, in_channels, out_channels, n_channels=64, res_blocks=4, n_upsample=2, normalization=nn.InstanceNorm2d, activation=None, bias=True, gaussian_upsample=True):
super(ResNetDecoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.n_channels = n_channels
self.res_blocks = res_blocks
self.n_upsample = n_upsample
self.normalization = normalization
self.activation = (nn.ReLU() if (activation is None) else activation)
self.bias = bias
self.gaussian_upsample = gaussian_upsample
assert (in_channels == (n_channels * (2 ** n_upsample))), '`n_channels * 2 ** n_upsample` must equal `in_channels`'
self.net = self.build_net(self.in_channels, self.out_channels, self.n_channels, self.res_blocks, self.n_upsample, self.normalization, self.activation, self.bias, self.gaussian_upsample)
self.init_weights()
def build_net(self, in_channels, out_channels, n_channels, res_blocks, n_upsample, normalization, activation, bias, gaussian_upsample):
net = []
for _ in range(res_blocks):
net.append(ResNetBlock(in_channels, normalization=normalization, activation=activation, bias=bias, kernel_size=3, padding=1, pad_type='reflect'))
for i in range(n_upsample):
mult = (2 ** (n_upsample - i))
if gaussian_upsample:
net.append(GaussianConv2d((n_channels * mult), kernel_size=4, stride=2, deconv=True))
else:
net.append(nn.Upsample(scale_factor=2))
net.append(ConvBlock((n_channels * mult), ((n_channels * mult) // 2), normalization=normalization, activation=activation, kernel_size=3, padding=1, pad_type='reflect', bias=bias))
net.append(ConvBlock(n_channels, out_channels, normalization=None, activation=torch.nn.Tanh(), kernel_size=7, padding=3, pad_type='reflect'))
return nn.Sequential(*net) |
def load_fields_from_vocab(vocab, data_type='text'):
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(data_type, n_src_features, n_tgt_features)
for (k, v) in vocab.items():
v.stoi = defaultdict((lambda : 0), v.stoi)
fields[k].vocab = v
return fields |
.parametrize('action_dist, estimated_rewards_by_reg_model, description', invalid_input_of_create_estimator_inputs)
def test_meta_create_estimator_inputs_using_invalid_input_data(action_dist, estimated_rewards_by_reg_model, description: str, synthetic_bandit_feedback: BanditFeedback) -> None:
ope_ = OffPolicyEvaluation(bandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw])
with pytest.raises(ValueError, match=f'{description}*'):
_ = ope_._create_estimator_inputs(action_dist=action_dist, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
with pytest.raises(ValueError, match=f'{description}*'):
_ = ope_.estimate_policy_values(action_dist=action_dist, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
with pytest.raises(ValueError, match=f'{description}*'):
_ = ope_.estimate_intervals(action_dist=action_dist, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
with pytest.raises(ValueError, match=f'{description}*'):
_ = ope_.summarize_off_policy_estimates(action_dist=action_dist, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
with pytest.raises(ValueError, match=f'{description}*'):
_ = ope_.evaluate_performance_of_estimators(ground_truth_policy_value=0.1, action_dist=action_dist, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model)
with pytest.raises(ValueError, match=f'{description}*'):
_ = ope_.summarize_estimators_comparison(ground_truth_policy_value=0.1, action_dist=action_dist, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model) |
def load_tf_weights_in_bert_generation(*args, **kwargs):
requires_backends(load_tf_weights_in_bert_generation, ['torch']) |
def tensor_size_bytes(tensor):
if ((tensor is None) or (not tensor.is_cuda)):
return 0
return (tensor.numel() * tensor.element_size()) |
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count) |
def _observe(state: State, player_id: Array) -> Array:
board: Array = state._board
playable_dice_count_vec: Array = _to_playable_dice_count(state._playable_dice)
return jax.lax.cond((player_id == state.current_player), (lambda : jnp.concatenate((board, playable_dice_count_vec), axis=None)), (lambda : jnp.concatenate((board, jnp.zeros(6, dtype=jnp.int32)), axis=None))) |
def traverse_dir(root_dir, extension=('mid', 'MID', 'midi'), amount=None, str_=None, is_pure=False, verbose=False, is_sort=False, is_ext=True):
if verbose:
print('[*] Scanning...')
file_list = []
cnt = 0
for (root, _, files) in os.walk(root_dir):
for file in files:
if file.endswith(extension):
if ((amount is not None) and (cnt == amount)):
break
if (str_ is not None):
if (str_ not in file):
continue
mix_path = os.path.join(root, file)
pure_path = (mix_path[(len(root_dir) + 1):] if is_pure else mix_path)
if (not is_ext):
ext = pure_path.split('.')[(- 1)]
pure_path = pure_path[:(- (len(ext) + 1))]
if verbose:
print(pure_path)
file_list.append(pure_path)
cnt += 1
if verbose:
print(('Total: %d files' % len(file_list)))
print('Done!!!')
if is_sort:
file_list.sort()
return file_list |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Socket> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
return |
class ImageNetDataNP():
def __init__(self, folder_path):
test_data = np.load(os.path.join(folder_path, 'imagenet_test_data.npy'))
test_labels = np.load(os.path.join(folder_path, 'imagenet_test_labels.npy'))
self.test_data = test_data
self.test_labels = test_labels |
def read_fasta_sequence(numeric, fasta_file):
first_char = fasta_file.read(1)
if (first_char == ''):
return ['', '']
elif (first_char == '>'):
line = ''
else:
line = first_char
line = (line + fasta_file.readline())
words = line.split()
if (len(words) == 0):
sys.stderr.write(('No words in header line (%s)\n' % line))
sys.exit(1)
id = words[0]
first_char = fasta_file.read(1)
sequence = ''
while ((first_char != '>') and (first_char != '')):
if (first_char != '\n'):
line = fasta_file.readline()
sequence = ((sequence + first_char) + line)
first_char = fasta_file.read(1)
clean_sequence = ''
for letter in sequence:
if (letter != '\n'):
clean_sequence += letter
sequence = clean_sequence
if (numeric == 0):
clean_sequence = ''
for letter in sequence:
if (letter != ' '):
clean_sequence = (clean_sequence + letter)
sequence = clean_sequence.upper()
return [id, sequence] |
def infer(env, agent, **kwargs):
obs = env.reset()
dones = False
total_reward_weights = 0
while (not dones):
(action, _) = agent.predict(obs)
(obs, rewards, dones, info) = env.step(action)
total_reward_weights += rewards
if dones:
break
show_state(env, info[0]['iterations'], info[0]['changes'], total_reward_weights)
if kwargs.get('verbose', False):
print(info[0]) |
def get_task_configuration(config) -> List:
if hasattr(config, 'sub_task'):
mode = '{} {}'.format(config.task, config.sub_task)
else:
mode = config.task
requested_configurations = [task_config() for task_config in TaskConfiguration.__subclasses__() if (task_config.mode() == mode)]
if (len(requested_configurations) > 1):
raise ValueError('Multiple configurations for {}'.format(mode))
if (len(requested_configurations) == 0):
raise ValueError('No configuration available for {}'.format(mode))
return requested_configurations[0].tasks(config) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--encoder-json', help='path to encoder.json')
parser.add_argument('--vocab-bpe', type=str, help='path to vocab.bpe')
parser.add_argument('--inputs', nargs='+', default=['-'], help='input files to filter/encode')
parser.add_argument('--outputs', nargs='+', default=['-'], help='path to save encoded outputs')
parser.add_argument('--keep-empty', action='store_true', help='keep empty lines')
parser.add_argument('--workers', type=int, default=20)
args = parser.parse_args()
assert (len(args.inputs) == len(args.outputs)), 'number of input and output paths should match'
with contextlib.ExitStack() as stack:
inputs = [(stack.enter_context(open(input, 'r', encoding='utf-8')) if (input != '-') else sys.stdin) for input in args.inputs]
outputs = [(stack.enter_context(open(output, 'w', encoding='utf-8')) if (output != '-') else sys.stdout) for output in args.outputs]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for (i, (filt, enc_lines)) in enumerate(encoded_lines, start=1):
if (filt == 'PASS'):
for (enc_line, output_h) in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats[('num_filtered_' + filt)] += 1
if ((i % 10000) == 0):
print('processed {} lines'.format(i), file=sys.stderr)
for (k, v) in stats.most_common():
print('[{}] filtered {} lines'.format(k, v), file=sys.stderr) |
def confidence_interval(data: ArrayLike, func: Callable[([ArrayLike], NDArray)]=np.mean, size: int=1000, ci: int=95, seed: Optional[int]=None) -> float:
bs_replicates = bootstrap(data, func=func, n_boot=size, seed=seed)
p = ((50 - (ci / 2)), (50 + (ci / 2)))
bounds = np.nanpercentile(bs_replicates, p)
return ((bounds[1] - bounds[0]) / 2) |
def small_bn_opp_resnet(image, test=False, w_bias=False, channel_last=False, name='bn-graph-ref', dims=2):
kernel = ((3,) * dims)
pool_kernel = ((2,) * dims)
pad = ((1,) * dims)
h = image
h /= 255.0
axes = get_channel_axes(h, channel_last, dims)
h = PF.batch_normalization(h, axes=axes, batch_stat=(not test), name='first-bn')
h = PF.convolution(h, 16, kernel=kernel, pad=pad, channel_last=channel_last, with_bias=True, name='first-conv')
h = F.relu(h)
h = (F.max_pooling(h, pool_kernel, channel_last=channel_last) if (dims > 1) else h)
h = bn_opp_resblock(h, maps=16, test=test, channel_last=channel_last, name='cb1', dims=dims)
h = bn_opp_resblock(h, maps=16, test=test, channel_last=channel_last, name='cb2', dims=dims)
h = bn_opp_resblock(h, maps=16, test=test, channel_last=channel_last, name='cb3', dims=dims)
h = bn_opp_resblock(h, maps=16, test=test, channel_last=channel_last, name='cb4', dims=dims)
h = (F.average_pooling(h, pool_kernel, channel_last=channel_last) if (dims > 1) else h)
pred = PF.affine(h, 10, name='fc')
return pred |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.