code stringlengths 101 5.91M |
|---|
.gpu
def test_gpu_access_on_host_tasklet():
def tester(a: (dace.float64[20] dace.StorageType.GPU_Global)):
for i in (dace.map[0:20] dace.ScheduleType.CPU_Multicore):
a[i] = 1
with pytest.raises(InvalidSDFGEdgeError):
tester.to_sdfg(validate=True) |
def get_deepfashion_img_class_name(filename, mode):
img_class_name = deepfashion_name_parse(filename, mode)
return img_class_name |
def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator():
iris = datasets.load_iris()
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
try:
module_backup = TreeNoVersion.__module__
TreeNoVersion.__module__ = 'notsklearn'
assert_no_warnings(pickle.loads, tree_pickle_noversion)
finally:
TreeNoVersion.__module__ = module_backup |
class EvalLMConfig(FairseqDataclass):
output_word_probs: bool = field(default=False, metadata={'help': 'if set, outputs words and their predicted log probabilities to standard output'})
output_word_stats: bool = field(default=False, metadata={'help': 'if set, outputs word statistics such as word count, average probability, etc'})
context_window: int = field(default=0, metadata={'help': 'ensures that every evaluated token has access to a context of at least this size, if possible'})
softmax_batch: int = field(default=sys.maxsize, metadata={'help': 'if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory'}) |
def dma_gather_base(context, reg: DMA_gather_reg):
lane_mask = ((reg.localmem_mask_h32 * (2 ** 32)) + reg.localmem_mask_l32)
(c, h, w) = (reg[f'src_{d}size'] for d in 'chw')
d_h = reg.dst_hsize
if reg.nchw_copy:
d_h = h
stride = (((c * h) * w), (h * w), w, 1)
opd0 = dict(address=dma_addr(reg.src_start_addr_h8, reg.src_start_addr_l32), dtype=DType(reg.src_data_format), shape=(1, c, h, w), stride=(0, reg.src_cstride, reg.src_hstride, 1), layout=Layout.stride)
res0 = dict(address=dma_addr(reg.dst_start_addr_h8, reg.dst_start_addr_l32), dtype=DType(reg.src_data_format), shape=(1, max(c, reg.index_csize), d_h, w), stride=(0, reg.dst_cstride, reg.dst_hstride, 1), layout=Layout.DMAstride(lane_mask))
opd1 = dict(address=dma_addr(reg.index_start_addr_h8, reg.index_start_addr_l32), dtype=DType.ui32, shape=(1, reg.index_csize, d_h, 1), stride=(0, reg.index_cstride, reg.index_hstride, 1), layout=Layout.stride)
const = get_value(context, address=reg.constant_value, dtype=DType(reg.src_data_format), is_const=True).data
attr = dict(const=const)
operands = [get_value(context, **x) for x in (opd0, opd1)]
results = [get_value(context, **res0)]
return (results, attr, operands) |
def evaluate_grasp() -> None:
device = ('cuda' if torch.cuda.is_available() else 'cpu')
(backbone, preprocess) = load('v-cond', device=device)
(output_resolution, upsample_stages) = (80, 4)
map_extractor_fn = instantiate_extractor(backbone, n_latents=int(((output_resolution ** 2) / (4 ** upsample_stages))))
grasp_evaluator = vet.GraspAffordanceHarness('v-cond', backbone, preprocess, map_extractor_fn)
grasp_evaluator.fit()
grasp_evaluator.test() |
class RunExpander(ABC):
name: str
def expand(self, run_spec: RunSpec) -> List[RunSpec]:
pass |
def test_init_with_env_updates(policy, envs):
task_sampler = EnvPoolSampler(envs)
envs = task_sampler.sample(N_TRAJ)
true_workers = WorkerFactory(seed=100, n_workers=N_TRAJ, max_path_length=MAX_PATH_LENGTH)
true_sampler = LocalSampler.from_worker_factory(true_workers, policy, envs)
vec_workers = WorkerFactory(seed=100, n_workers=1, worker_class=VecWorker, worker_args=dict(n_envs=N_TRAJ), max_path_length=MAX_PATH_LENGTH)
vec_sampler = LocalSampler.from_worker_factory(vec_workers, [policy], [envs])
n_samples = 100
true_trajs = true_sampler.obtain_samples(0, n_samples, None)
vec_trajs = vec_sampler.obtain_samples(0, n_samples, None)
assert (vec_trajs.lengths.sum() >= n_samples)
assert_trajs_eq(true_trajs, vec_trajs)
true_sampler.shutdown_worker()
vec_sampler.shutdown_worker() |
def hack_trainer_type_to_gap_aware(args, stage_depth=None):
def hack():
args.trainer['type'] += '_gap_aware'
if hasattr(args, 'gap_aware'):
if (stage_depth is None):
is_zero_staleness_stage = (args.local_rank == (args.world_size - 1))
is_one_staleness_stage = (args.local_rank == (args.world_size - 2))
else:
is_zero_staleness_stage = (stage_depth == 0)
is_one_staleness_stage = (stage_depth == 1)
warnings.warn('Assuming no grad accumulation and no staleness limit...')
if (args.gap_aware['policy'] == 'almost_last_partition'):
if is_one_staleness_stage:
hack()
return True
elif (args.gap_aware['policy'] == 'all_except_last'):
if (not is_zero_staleness_stage):
hack()
return True
elif (args.gap_aware['policy'] == 'all_except_last_two'):
if ((not is_zero_staleness_stage) and (not is_one_staleness_stage)):
hack()
return True
else:
raise ValueError(f"Unknown policy for GA {args.gap_aware['policy']}. supported policies are {SUPPORTED_GAP_AWARE_POLICIES}")
return False |
def is_value_tok(t):
if t[0].isalpha():
return False
return (process_literal(t) != 'null') |
.parametrize('score', [AbsoluteConformityScore(), GammaConformityScore(), ResidualNormalisedScore()])
.parametrize('alpha', [[0.3], [0.5, 0.4]])
def test_intervals_shape_with_every_score(score: ConformityScore, alpha: Any) -> None:
mapie_reg = MapieRegressor(method='base', cv='split', conformity_score=score)
X = np.concatenate((X_toy, X_toy))
y = np.concatenate((y_toy, y_toy))
mapie_reg = mapie_reg.fit(X, y)
(y_pred, intervals) = mapie_reg.predict(X, alpha=alpha)
n_samples = X.shape[0]
assert (y_pred.shape[0] == n_samples)
assert (intervals.shape == (n_samples, 2, len(alpha))) |
def _get_lines(graph_parse, is_variable, a_key, b_key):
assert isinstance(graph_parse, GraphParse)
if graph_parse.line_graph.has_edge(a_key, b_key):
if is_variable:
line = graph_parse.line_graph[a_key][b_key]['variable']
else:
line = graph_parse.line_graph[a_key][b_key]['instance']
return {(a_key, b_key): line}
else:
return {} |
class DebertaTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
class SGACellularBasis(CellularBasis):
def __init__(self, SGA):
CellularBasis.__init__(self, SGA, self._to_sga)
def _repr_(self):
return (self._name + ' basis of {}'.format(self._algebra))
_method
def one_basis(self):
la = _Partitions([self._algebra.n])
col = la.standard_tableaux()[0]
return (la, col, col)
_method
def one(self):
return self.monomial(self.one_basis()) |
class iMAMLMetaLearner(GradBasedMetaLearner):
def __init__(self, model, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), lambda_reg=1.0, n_iters_optimizer=5, name='FOMAMLMetaLearner'):
self.model = model
self.optimizer = optimizer
self.lambda_reg = lambda_reg
self.n_iters_optimizer = n_iters_optimizer
model.imaml_reg_initial_params = [tf.Variable(tf.zeros(v.shape), dtype=v.dtype.base_dtype, trainable=False, name='iMAML_regularizer_initial_parameters') for v in model.trainable_variables]
def imaml_regularizer():
reg = tf.add_n([tf.reduce_sum(tf.square((model.trainable_variables[i] - model.imaml_reg_initial_params[i]))) for i in range(len(model.trainable_variables))])
return ((0.5 * self.lambda_reg) * reg)
self.model.add_loss(imaml_regularizer)
def _gradients_for_task_CG(self, t):
(test_x, test_y) = t.get_test_set()
(train_x, train_y) = t.get_train_set()
def Av(v):
Hv = Hvp(self.model, train_x, train_y, v)
return [(v[i] + ((1.0 / self.lambda_reg) * Hv[i].numpy())) for i in range(len(v))]
test_gradients = [g.numpy() for g in grads_on_batch(self.model, test_x, test_y)]
x0 = [np.zeros(var.shape, dtype=np.float32) for var in self.current_initial_parameters]
b = deepcopy(test_gradients)
debug = False
x = steepest_descent(Av, b, x0, self.n_iters_optimizer, debug=debug)
if debug:
print(list(zip(x[(- 1)], test_gradients[(- 1)], Av(x)[(- 1)])))
print('*****\n')
import sys
sys.exit()
return x
def initialize(self):
self.current_initial_parameters = [v.numpy() for v in self.model.trainable_variables]
def task_begin(self, task=None, **kwargs):
super().task_begin(task=task)
for i in range(len(self.current_initial_parameters)):
self.model.trainable_variables[i].assign(self.current_initial_parameters[i])
self.model.imaml_reg_initial_params[i].assign(self.current_initial_parameters[i])
def task_end(self, task=None, **kwargs):
assert (task is not None), "FOMAML needs a `task' argument on .task_end to compute the data it needs."
if isinstance(task, TaskAsSequenceOfTasks):
ret_grads = self._gradients_for_task_CG(task.get_task_by_index((- 1)))
else:
ret_grads = self._gradients_for_task_CG(task)
return ret_grads
def update(self, list_of_final_gradients, **kwargs):
avg_final_grads = []
for grads in zip(*list_of_final_gradients):
avg_final_grads.append(np.mean(grads, axis=0))
for i in range(len(self.current_initial_parameters)):
self.model.trainable_variables[i].assign(self.current_initial_parameters[i])
self.optimizer.apply_gradients(zip(avg_final_grads, self.model.trainable_variables))
self.current_initial_parameters = [v.numpy() for v in self.model.trainable_variables] |
class ConceptNetGenerationIteratorTrainer(base_train.AtomicGenerationIteratorTrainer):
def set_evaluator(self, opt, model, data_loader):
self.evaluator = evaluate.make_evaluator(opt, model, data_loader)
def set_generator(self, opt, model, data_loader):
self.generator = gen.make_generator(opt, model, data_loader)
def batch(self, opt, *args):
outputs = batch_utils.batch_atomic_generate(opt, *args)
token_loss = outputs['loss']
nums = outputs['nums']
reset = outputs['reset']
return (token_loss, nums, reset)
def update_top_score(self, opt):
print(self.top_score)
tracked_scores = self.get_tracked_score()
if (self.top_score is None):
self.top_score = self.top_score = {'epoch': {}, 'score': {}}
self.top_score['epoch']['total_micro'] = self.opt.train.dynamic.epoch
self.top_score['score']['total_micro'] = tracked_scores['total_micro']
elif (tracked_scores['total_micro'] < self.top_score['score']['total_micro']):
self.top_score['epoch']['total_micro'] = self.opt.train.dynamic.epoch
self.top_score['score']['total_micro'] = tracked_scores['total_micro']
print(self.top_score)
def get_tracked_score(self):
return {'total_micro': self.losses['dev']['total_micro'][self.opt.train.dynamic.epoch]}
def decide_to_save(self):
to_save = (cfg.save and (not cfg.toy))
curr_epoch = self.opt.train.dynamic.epoch
to_save = (to_save or cfg.test_save)
print(cfg.save_strategy)
if (cfg.save_strategy == 'best'):
if (self.top_score['epoch']['total_micro'] != curr_epoch):
to_save = False
return to_save |
def p_list_maker(s):
pos = s.position()
s.next()
if (s.sy == ']'):
s.expect(']')
return ExprNodes.ListNode(pos, args=[])
expr = p_test_or_starred_expr(s)
if (s.sy in ('for', 'async')):
if expr.is_starred:
s.error('iterable unpacking cannot be used in comprehension')
append = ExprNodes.ComprehensionAppendNode(pos, expr=expr)
loop = p_comp_for(s, append)
s.expect(']')
return ExprNodes.ComprehensionNode(pos, loop=loop, append=append, type=Builtin.list_type, has_local_scope=(s.context.language_level >= 3))
if (s.sy == ','):
s.next()
exprs = p_test_or_starred_expr_list(s, expr)
else:
exprs = [expr]
s.expect(']')
return ExprNodes.ListNode(pos, args=exprs) |
class FeatureHookNet(nn.ModuleDict):
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, feature_concat=False, flatten_sequential=False, default_hook_type='forward'):
super(FeatureHookNet, self).__init__()
assert (not torch.jit.is_scripting())
self.feature_info = _get_feature_info(model, out_indices)
self.out_as_dict = out_as_dict
layers = OrderedDict()
hooks = []
if no_rewrite:
assert (not flatten_sequential)
if hasattr(model, 'reset_classifier'):
model.reset_classifier(0)
layers['body'] = model
hooks.extend(self.feature_info.get_dicts())
else:
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = {f['module']: (f['hook_type'] if ('hook_type' in f) else default_hook_type) for f in self.feature_info.get_dicts()}
for (new_name, old_name, module) in modules:
layers[new_name] = module
for (fn, fm) in module.named_modules(prefix=old_name):
if (fn in remaining):
hooks.append(dict(module=fn, hook_type=remaining[fn]))
del remaining[fn]
if (not remaining):
break
assert (not remaining), f'Return layers ({remaining}) are not present in model'
self.update(layers)
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
def forward(self, x):
for (name, module) in self.items():
x = module(x)
out = self.hooks.get_output(x.device)
return (out if self.out_as_dict else list(out.values())) |
def _cos_n_atan(x, y, n):
assert n.is_integer
if (n < 0):
return _cos_n_atan(x, y, ((- 1) * n))
if (n == 0):
return 1
else:
r2 = ((x * x) + (y * y))
r = sqrt(r2)
return (((x * _cos_n_atan(x, y, (n - 1))) / r) - ((y * _sin_n_atan(x, y, (n - 1))) / r)) |
class TFWav2Vec2PreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def get_file_handle(path_, r_w_a):
try:
fhand = open(path_, r_w_a)
except:
print('Cannot open file {}'.format(path_))
exit()
return fhand |
def densenet161(pretrained=False, **kwargs):
model = DenseNet(num_init_features=96, growth_rate=48, block_config=(6, 12, 36, 24), **kwargs)
if pretrained:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url(model_urls['densenet161'])
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
return model |
class FantasizerModelStack(PredictJointModelStack, PredictYModelStack, ModelStack[FantasizerModelType]):
pass |
_test()
def test_reduce_sum_all_axis():
A = np.random.rand(4, 4).astype(np.float32)
B = np.random.rand(1).astype(np.float32)
sdfg = create_reduce_sdfg('lambda a,b: a+b', (0, 1), 'reduction_sum_all_axis', A, B, dace.float32)
from dace.libraries.standard import Reduce
Reduce.default_implementation = 'FPGAPartialReduction'
sdfg.expand_library_nodes()
sdfg(A=A, B=B)
assert np.allclose(B, np.sum(A, axis=(0, 1)))
return sdfg |
()
class IQNQFunctionFactory(QFunctionFactory):
n_quantiles: int = 64
n_greedy_quantiles: int = 32
embed_size: int = 64
def create_discrete(self, encoder: Encoder, hidden_size: int, action_size: int) -> Tuple[(DiscreteIQNQFunction, DiscreteIQNQFunctionForwarder)]:
q_func = DiscreteIQNQFunction(encoder=encoder, hidden_size=hidden_size, action_size=action_size, n_quantiles=self.n_quantiles, n_greedy_quantiles=self.n_greedy_quantiles, embed_size=self.embed_size)
forwarder = DiscreteIQNQFunctionForwarder(q_func, self.n_quantiles)
return (q_func, forwarder)
def create_continuous(self, encoder: EncoderWithAction, hidden_size: int) -> Tuple[(ContinuousIQNQFunction, ContinuousIQNQFunctionForwarder)]:
q_func = ContinuousIQNQFunction(encoder=encoder, hidden_size=hidden_size, n_quantiles=self.n_quantiles, n_greedy_quantiles=self.n_greedy_quantiles, embed_size=self.embed_size)
forwarder = ContinuousIQNQFunctionForwarder(q_func, self.n_greedy_quantiles)
return (q_func, forwarder)
def get_type() -> str:
return 'iqn' |
def explain_pickle_string(pickle, in_current_sage=False, default_assumptions=False, eval=False, preparse=True, pedantic=False):
sib = SageInputBuilder(preparse=preparse)
pe = PickleExplainer(sib, in_current_sage=in_current_sage, default_assumptions=default_assumptions, pedantic=pedantic)
v = pe.run_pickle(pickle)
ans = sib.result(sib(v))
if eval:
if default_assumptions:
raise ValueError('Not safe to evaluate code generated with default_assumptions')
from sage.misc.sage_eval import sage_eval
result = sage_eval(ans, preparse=preparse)
print(ans)
return result
else:
return ans |
def sym_pp(W_list, funcs, var_names, threshold=0.01, n_double=0):
vars = []
for var in var_names:
if isinstance(var, str):
vars.append(sym.Symbol(var))
else:
vars.append(var)
expr = sym.Matrix(vars).T
W_list = np.asarray(W_list)
for W in W_list:
W = filter_mat(sym.Matrix(W), threshold=threshold)
expr = (expr * W)
expr = apply_activation(expr, funcs, n_double=n_double)
return expr |
class STN3d(nn.Module):
def __init__(self, n=4):
super(STN3d, self).__init__()
self.n = n
self.conv1 = torch.nn.Conv1d(n, 64, 1, bias=False)
self.conv2 = torch.nn.Conv1d(64, 128, 1, bias=False)
self.conv3 = torch.nn.Conv1d(128, 1024, 1, bias=False)
self.fc1 = nn.Linear(1024, 512, bias=False)
self.fc2 = nn.Linear(512, 256, bias=False)
self.fc3 = nn.Linear(256, (n * n))
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view((- 1), 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = np.eye(self.n).ravel().astype(np.float32)
iden = torch.from_numpy(iden).view(1, (self.n * self.n)).repeat(batchsize, 1)
iden = iden.to(device=x.device)
x = (x + iden)
x = x.view((- 1), self.n, self.n)
return x |
class VAEBaseline(nn.Module):
def __init__(self, latent_space_size=10):
super(VAEBaseline, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, latent_space_size)
self.fc22 = nn.Linear(400, latent_space_size)
self.fc3 = nn.Linear(latent_space_size, 400)
self.fc4 = nn.Linear(400, 784)
print('Total model parameters {}'.format(self.count_parameters()))
def encode(self, x):
h1 = F.relu(self.fc1(x))
return (self.fc21(h1), self.fc22(h1))
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp((0.5 * logvar))
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, x):
(mu, logvar) = self.encode(x.view((- 1), 784))
z = self.reparameterize(mu, logvar)
return (self.decode(z), mu, logvar)
def count_parameters(self):
return sum((p.numel() for p in self.parameters() if p.requires_grad)) |
class TextPrint():
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printf(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10 |
def get_available_detector_ids(detectors_path):
return [dir_name for dir_name in listdir(detectors_path) if isdir(join(detectors_path, dir_name))] |
def test_cast_tensor_type():
inputs = torch.rand(10)
if torch.cuda.is_available():
inputs = inputs.cuda()
with pytest.raises(AssertionError):
cast_tensor_type(inputs, src_type=None, dst_type=None)
out = cast_tensor_type(10.0, dst_type=torch.half)
assert ((out == 10.0) and isinstance(out, float))
fp16_out = cast_tensor_type(inputs, dst_type=torch.half)
assert (fp16_out.dtype == torch.half)
fp32_out = cast_tensor_type(fp16_out, dst_type=torch.float32)
assert (fp32_out.dtype == torch.float32)
list_input = [inputs, inputs]
list_outs = cast_tensor_type(list_input, dst_type=torch.half)
assert ((len(list_outs) == len(list_input)) and isinstance(list_outs, list))
for out in list_outs:
assert (out.dtype == torch.half)
dict_input = {'test1': inputs, 'test2': inputs}
dict_outs = cast_tensor_type(dict_input, dst_type=torch.half)
assert ((len(dict_outs) == len(dict_input)) and isinstance(dict_outs, dict))
if torch.cuda.is_available():
cpu_device = torch.empty(0).device
gpu_device = inputs.device
cpu_out = cast_tensor_type(inputs, dst_type=cpu_device)
assert (cpu_out.device == cpu_device)
gpu_out = cast_tensor_type(inputs, dst_type=gpu_device)
assert (gpu_out.device == gpu_device) |
def main():
parser = HfArgumentParser(ScriptArguments)
args = parser.parse_args_into_dataclasses()[0]
logger.info(f'Parse args: {args}')
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
if (args.model_type == 'bloom'):
args.use_fast_tokenizer = True
tokenizer_kwargs = {'cache_dir': args.cache_dir, 'use_fast': args.use_fast_tokenizer, 'trust_remote_code': args.trust_remote_code}
tokenizer_name_or_path = args.tokenizer_name_or_path
if (not tokenizer_name_or_path):
tokenizer_name_or_path = args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(tokenizer_name_or_path, **tokenizer_kwargs)
if (tokenizer.pad_token_id is None):
tokenizer.pad_token_id = 0
peft_config = None
if args.use_peft:
logger.info('Fine-tuning method: LoRA(PEFT)')
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, target_modules=args.target_modules, inference_mode=False, r=args.lora_rank, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout)
else:
logger.info('Fine-tuning method: Full parameters training')
torch_dtype = (args.torch_dtype if (args.torch_dtype in ['auto', None]) else getattr(torch, args.torch_dtype))
world_size = int(os.environ.get('WORLD_SIZE', '1'))
if (world_size > 1):
args.device_map = {'': int(os.environ.get('LOCAL_RANK', '0'))}
config = config_class.from_pretrained(args.model_name_or_path, torch_dtype=torch_dtype, trust_remote_code=args.trust_remote_code, cache_dir=args.cache_dir)
model = AutoModelForCausalLMWithValueHead.from_pretrained(args.model_name_or_path, config=config, torch_dtype=torch_dtype, load_in_4bit=args.load_in_4bit, load_in_8bit=args.load_in_8bit, device_map=args.device_map, trust_remote_code=args.trust_remote_code, peft_config=(peft_config if args.use_peft else None))
print_trainable_parameters(model)
default_device = ('cuda' if torch.cuda.is_available() else 'cpu')
device = (args.reward_model_device if (args.reward_model_device is not None) else default_device)
reward_config = config_class.from_pretrained(args.reward_model_name_or_path, torch_dtype=torch_dtype, trust_remote_code=args.trust_remote_code, cache_dir=args.cache_dir)
reward_model = AutoModelForSequenceClassification.from_pretrained(args.reward_model_name_or_path, config=reward_config, load_in_8bit=args.load_in_8bit, trust_remote_code=args.trust_remote_code)
reward_model.to(device)
reward_tokenizer = AutoTokenizer.from_pretrained(args.reward_model_name_or_path, **tokenizer_kwargs)
if (args.dataset_name is not None):
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset(args.dataset_name, args.dataset_config_name, split=f'train[:{args.validation_split_percentage}%]', cache_dir=args.cache_dir)
raw_datasets['train'] = load_dataset(args.dataset_name, args.dataset_config_name, split=f'train[{args.validation_split_percentage}%:]', cache_dir=args.cache_dir)
else:
data_files = {}
if ((args.train_file_dir is not None) and os.path.exists(args.train_file_dir)):
train_data_files = (glob(f'{args.train_file_dir}/**/*.json', recursive=True) + glob(f'{args.train_file_dir}/**/*.jsonl', recursive=True))
logger.info(f"train files: {', '.join(train_data_files)}")
data_files['train'] = train_data_files
if ((args.validation_file_dir is not None) and os.path.exists(args.validation_file_dir)):
eval_data_files = (glob(f'{args.validation_file_dir}/**/*.json', recursive=True) + glob(f'{args.validation_file_dir}/**/*.jsonl', recursive=True))
logger.info(f"eval files: {', '.join(eval_data_files)}")
data_files['validation'] = eval_data_files
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=args.cache_dir)
if ('validation' not in raw_datasets.keys()):
raw_datasets['validation'] = load_dataset('json', data_files=data_files, split=f'train[:{args.validation_split_percentage}%]', cache_dir=args.cache_dir)
raw_datasets['train'] = load_dataset('json', data_files=data_files, split=f'train[{args.validation_split_percentage}%:]', cache_dir=args.cache_dir)
logger.info(f'Raw datasets: {raw_datasets}')
max_source_length = args.max_source_length
max_target_length = args.max_target_length
prompt_template = get_conv_template(args.template_name)
def preprocess_function(examples):
new_examples = {'query': [], 'input_ids': []}
roles = ['human', 'gpt']
def get_prompt(examples):
for (i, source) in enumerate(examples['conversations']):
if (len(source) < 2):
continue
data_role = source[0].get('from', '')
if ((data_role not in roles) or (data_role != roles[0])):
source = source[1:]
if (len(source) < 2):
continue
messages = []
for (j, sentence) in enumerate(source):
data_role = sentence.get('from', '')
if (data_role not in roles):
logger.warning(f'unknown role: {data_role}, {i}. (ignored)')
break
if (data_role == roles[(j % 2)]):
messages.append(sentence['value'])
if ((len(messages) < 2) or ((len(messages) % 2) != 0)):
continue
history_messages = [[messages[k], messages[(k + 1)]] for k in range(0, len(messages), 2)]
(yield prompt_template.get_prompt(history_messages))
for prompt in get_prompt(examples):
for i in range((len(prompt) // 2)):
source_txt = prompt[(2 * i)]
tokenized_question = tokenizer(source_txt, truncation=True, max_length=max_source_length, padding='max_length', return_tensors='pt')
new_examples['query'].append(source_txt)
new_examples['input_ids'].append(tokenized_question['input_ids'])
return new_examples
train_dataset = None
if args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if ((args.max_train_samples is not None) and (args.max_train_samples > 0)):
max_train_samples = min(len(train_dataset), args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
logger.debug(f'Example train_dataset[0]: {train_dataset[0]}')
tokenized_dataset = train_dataset.shuffle().map(preprocess_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=train_dataset.column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
train_dataset = tokenized_dataset.filter((lambda x: (len(x['input_ids']) > 0)))
logger.debug(f'Num train_samples: {len(train_dataset)}')
def collator(data):
return dict(((key, [d[key] for d in data]) for key in data[0]))
output_dir = args.output_dir
config = PPOConfig(steps=args.max_steps, model_name=args.model_name_or_path, learning_rate=args.learning_rate, log_with=args.report_to, batch_size=args.batch_size, mini_batch_size=args.mini_batch_size, gradient_accumulation_steps=args.gradient_accumulation_steps, optimize_cuda_cache=True, early_stopping=args.early_stopping, target_kl=args.target_kl, seed=args.seed, init_kl_coef=args.init_kl_coef, adap_kl_ctrl=args.adap_kl_ctrl, project_kwargs={'logging_dir': output_dir})
set_seed(config.seed)
trainer = PPOTrainer(config, model, ref_model=None, tokenizer=tokenizer, dataset=train_dataset, data_collator=collator)
generation_kwargs = {'max_new_tokens': max_target_length, 'temperature': 1.0, 'repetition_penalty': 1.0, 'top_p': 1.0, 'do_sample': True}
if args.do_train:
logger.info('*** Train ***')
total_steps = config.total_ppo_epochs
for (step, batch) in tqdm(enumerate(trainer.dataloader)):
if (step >= total_steps):
break
question_tensors = batch['input_ids']
question_tensors = [torch.LongTensor(i).to(device).squeeze(0) for i in question_tensors]
responses = []
response_tensors = []
for q_tensor in question_tensors:
response_tensor = trainer.generate(q_tensor, return_prompt=False, **generation_kwargs)
r = tokenizer.batch_decode(response_tensor, skip_special_tokens=True)[0]
responses.append(r)
response_tensors.append(response_tensor.squeeze(0))
batch['response'] = responses
score_outputs = [get_reward_model_output(reward_model, reward_tokenizer, q, r, device) for (q, r) in zip(batch['query'], batch['response'])]
rewards = calculate_rewards(score_outputs, args.reward_baseline)
try:
stats = trainer.step(question_tensors, response_tensors, rewards)
trainer.log_stats(stats, batch, rewards)
logger.debug(f'Step {step}/{total_steps}: reward score:{score_outputs}')
except ValueError as e:
logger.warning(f'Failed to log stats for step {step}, because of {e}')
if (step and ((step % args.save_steps) == 0)):
save_dir = os.path.join(output_dir, f'checkpoint-{step}')
trainer.save_pretrained(save_dir)
trainer.save_pretrained(output_dir) |
def LoadEdgeListStr(tspec, *args):
if (tspec == PUNGraph):
return LoadEdgeListStr_PUNGraph(*args)
if (tspec == PUndirNet):
return LoadEdgeListStr_PUndirNet(*args)
if (tspec == PDirNet):
return LoadEdgeListStr_PDirNet(*args)
if (tspec == PNGraph):
return LoadEdgeListStr_PNGraph(*args)
if (tspec == PNEANet):
return LoadEdgeListStr_PNEANet(*args)
if (tspec == PNGraphMP):
return LoadEdgeListStr_PNGraphMP(*args)
if (tspec == PNEANetMP):
return LoadEdgeListStr_PNEANetMP(*args)
raise TypeError('First argument has invalid type') |
def train(model, device, loader, loss_fun, optimizer):
model.train()
loss_accum = 0
for (step, batch) in enumerate(tqdm(loader, desc='Iteration')):
(x, y) = batch
y_pred = model(x.to(device))
if (y_pred.shape[1] == 1):
y_pred = y_pred.flatten()
loss = loss_fun(y_pred, y.to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_accum += loss.detach().cpu().item()
return (loss_accum / (step + 1)) |
def skip_if_checkpoint_not_accessible(path: str):
def try_load_path(path):
try:
(fs, path_to_open) = _get_fs_and_plain_path(path)
fs.open(path_to_open, 'rb')
except Exception:
return False
else:
return True
return pytest.mark.skipif((not try_load_path(path)), reason='Checkpoint not accessible')((lambda x: x)) |
.parametrize('forest_cls', FORESTS)
def test_predict_sparse(make_whas500, forest_cls):
seed = 42
whas500 = make_whas500(to_numeric=True)
(X, y) = (whas500.x, whas500.y)
X = np.random.RandomState(seed).binomial(n=5, p=0.1, size=X.shape)
(X_train, X_test, y_train, _) = train_test_split(X, y, random_state=seed)
forest = forest_cls(random_state=seed)
forest.fit(X_train, y_train)
y_pred = forest.predict(X_test)
y_cum_h = forest.predict_cumulative_hazard_function(X_test)
y_surv = forest.predict_survival_function(X_test)
X_train_csr = sparse.csr_matrix(X_train)
X_test_csr = sparse.csr_matrix(X_test)
forest_csr = forest_cls(random_state=seed)
forest_csr.fit(X_train_csr, y_train)
y_pred_csr = forest_csr.predict(X_test_csr)
y_cum_h_csr = forest_csr.predict_cumulative_hazard_function(X_test_csr)
y_surv_csr = forest_csr.predict_survival_function(X_test_csr)
assert (y_pred.shape[0] == X_test.shape[0])
assert (y_pred_csr.shape[0] == X_test.shape[0])
assert_array_equal(y_pred, y_pred_csr)
assert_array_equal(y_cum_h_csr, y_cum_h)
assert_array_equal(y_surv, y_surv_csr) |
def test_prune_sample(workspace_factory):
ws = workspace_factory()
sample = ws.samples[1]
with pytest.raises(pyhf.exceptions.InvalidWorkspaceOperation):
ws.prune(samples=sample)
new_ws = ws.prune(samples=[sample])
assert (sample not in new_ws.samples) |
class Plane3D(object):
def __init__(self, point=Point3D(0, 0, 0), normal=Vector3D(0, 0, 1)):
if (not isinstance(point, Point3D)):
raise NotImplementedError("Plane3D: invalid ``point'' argument")
if (not isinstance(normal, Vector3D)):
raise NotImplementedError("Plane3D: invalid ``normal'' argument")
if (0 == normal.mag2):
raise ValueError("Plane3D: ``normal'' must not be zero!")
self.point = point.copy()
self.normal = normal.copy()
def from_points(cls, point1, point2, point3):
v21 = (point2 - point1)
v31 = (point3 - point1)
return cls(point1, v21.cross(v31))
def from_line_and_point(cls, line, point):
return cls(point, line.direction.cross((point - line.point)))
def __contains__(self, obj):
return obj.on_plane(self)
def isparallel(self, other):
if isinstance(other, Line3D):
return self.normal.isperpendicular(other.direction)
elif isinstance(other, Vector3D):
return self.normal.isperpendicular(other)
elif isinstance(other, Plane3D):
return self.normal.iscollinear(other.normal)
else:
raise TypeError("Plane3D.isparallel: invalid ``other'' argument")
def __repr__(self):
return '<Plane3D({0},{1})>'.format(self.point, self.normal)
def __str__(self):
return 'Plane3D({0},{1})'.format(self.point, self.normal)
def copy(self):
return Plane3D(self.point.copy(), self.normal.copy())
def __eq__(self, other):
return ((other.point in self) and self.normal.iscollinear(other.normal))
def __ne__(self, other):
return (not (self == other))
def distance(self, other):
try:
if isinstance(other, Point3D):
if (other in self):
return 0
v = (other - self.point)
return (abs((v * self.normal)) / self.normal.mag)
elif self.isparallel(other):
return self.distance(other.point)
else:
return 0
except TypeError:
raise NotImplementedError(('Distance from Line3D to %s is not defined' % other))
def angle(self, other):
if isinstance(other, Line3D):
return self.normal.angle(other.direction)
elif isinstance(other, Plane3D):
return self.normal.angle(other.normal)
raise NotImplementedError(('Angle between a Line3D and a %s is not defined' % other))
def intersect(self, other):
if isinstance(other, Point3D):
if (other in self):
return other
else:
return None
elif isinstance(other, Line3D):
return other.intersect(self)
elif isinstance(other, Plane3D):
if self.isparallel(other):
return None
else:
(N1, N2) = (self.normal, other.normal)
(d1, d2) = ((N1 * self.point._vct), (N2 * other.point._vct))
det = ((((N1 * N1) * N2) * N2) - ((N1 * N2) ** 2))
deti = (1.0 / det)
c1 = ((((d1 * N2) * N2) - ((d2 * N1) * N1)) * deti)
c2 = ((((d1 * N2) * N2) - ((d2 * N1) * N1)) * deti)
point = Point3D.fromvector(((c1 * N1) + (c2 * N2)))
direction = N1.cross(N2)
return Line3D(point, direction)
raise NotImplementedError(('Intersection between a Plane3D and a %s is not defined' % other)) |
class BitPreActivationBottleneckLayer(nn.Module):
def __init__(self, config, in_channels, out_channels=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, drop_path_rate=0.0, is_first_layer=False):
super().__init__()
first_dilation = (first_dilation or dilation)
out_channels = (out_channels or in_channels)
mid_channels = make_div((out_channels * bottle_ratio))
if is_first_layer:
self.downsample = BitDownsampleConv(config, in_channels, out_channels, stride=stride, preact=True)
else:
self.downsample = None
self.norm1 = BitGroupNormActivation(config, in_channels)
self.conv1 = WeightStandardizedConv2d(in_channels, mid_channels, 1, eps=1e-08, padding=config.global_padding)
self.norm2 = BitGroupNormActivation(config, num_channels=mid_channels)
self.conv2 = WeightStandardizedConv2d(mid_channels, mid_channels, 3, stride=stride, groups=groups, eps=1e-08, padding=config.global_padding)
self.norm3 = BitGroupNormActivation(config, mid_channels)
self.conv3 = WeightStandardizedConv2d(mid_channels, out_channels, 1, eps=1e-08, padding=config.global_padding)
self.drop_path = (BitDropPath(drop_path_rate) if (drop_path_rate > 0) else nn.Identity())
def forward(self, hidden_states):
hidden_states_preact = self.norm1(hidden_states)
shortcut = hidden_states
if (self.downsample is not None):
shortcut = self.downsample(hidden_states_preact)
hidden_states = self.conv1(hidden_states_preact)
hidden_states = self.conv2(self.norm2(hidden_states))
hidden_states = self.conv3(self.norm3(hidden_states))
hidden_states = self.drop_path(hidden_states)
return (hidden_states + shortcut) |
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
if (system == 'win32'):
if (appauthor is None):
appauthor = appname
path = os.path.normpath(_get_win_folder('CSIDL_LOCAL_APPDATA'))
if appname:
if (appauthor is not False):
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, 'Cache')
elif (system == 'darwin'):
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if (appname and version):
path = os.path.join(path, version)
return path |
def find_next_example(example_id):
initial_example_id = example_id
example_id += 1
while (example_id != initial_example_id):
all_codes = get_example_topic_codes(example_id)
codes_found = sum([len(code_pr_infos) for (_, code_pr_infos) in all_codes])
if (codes_found > 0):
st.session_state['example_id'] = example_id
return
example_id = ((example_id + 1) % total_examples)
st.error(f'No examples found at the specified recall threshold: {recall_threshold}.', icon='') |
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = (torch.randn_like(fake_img) / math.sqrt((fake_img.shape[2] * fake_img.shape[3])))
(grad,) = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = (mean_path_length + (decay * (path_lengths.mean() - mean_path_length)))
path_penalty = (path_lengths - path_mean).pow(2).mean()
return (path_penalty, path_mean.detach(), path_lengths) |
def test_pdf_integration_staterror(backend):
spec = {'channels': [{'name': 'firstchannel', 'samples': [{'name': 'mu', 'data': [10.0, 10.0], 'modifiers': [{'name': 'mu', 'type': 'normfactor', 'data': None}]}, {'name': 'bkg1', 'data': [50.0, 70.0], 'modifiers': [{'name': 'stat_firstchannel', 'type': 'staterror', 'data': [12.0, 12.0]}]}, {'name': 'bkg2', 'data': [30.0, 20.0], 'modifiers': [{'name': 'stat_firstchannel', 'type': 'staterror', 'data': [5.0, 5.0]}]}, {'name': 'bkg3', 'data': [20.0, 15.0], 'modifiers': []}]}]}
pdf = pyhf.Model(spec)
par_set = pdf.config.param_set('stat_firstchannel')
(tensorlib, _) = backend
uncerts = tensorlib.astensor([[12.0, 12.0], [5.0, 5.0]])
nominal = tensorlib.astensor([[50.0, 70.0], [30.0, 20.0]])
quad = tensorlib.sqrt(tensorlib.sum(tensorlib.power(uncerts, 2), axis=0))
totals = tensorlib.sum(nominal, axis=0)
assert (pytest.approx(tensorlib.tolist(par_set.sigmas)) == tensorlib.tolist(tensorlib.divide(quad, totals))) |
class cifar10(CIFAR10):
def __init__(self, root, classes=range(10), train=True, transform=None, target_transform=None, download=True):
super(cifar10, self).__init__(root, train=train, transform=transform, target_transform=target_transform, download=download)
np.random.seed(1993)
cls_list = [i for i in range(100)]
np.random.shuffle(cls_list)
self.class_mapping = np.array(cls_list, copy=True)
if self.train:
train_data = []
train_labels = []
for i in range(len(self.data)):
if (self.targets[i] in classes):
train_data.append(self.data[i])
train_labels.append(cls_list.index(self.targets[i]))
self.train_data = np.array(train_data)
self.train_labels = train_labels
else:
test_data = []
test_labels = []
for i in range(len(self.data)):
if (self.targets[i] in classes):
test_data.append(self.data[i])
test_labels.append(cls_list.index(self.targets[i]))
self.test_data = np.array(test_data)
self.test_labels = test_labels
def __getitem__(self, index):
if self.train:
(img, target) = (self.train_data[index], self.train_labels[index])
else:
(img, target) = (self.test_data[index], self.test_labels[index])
img = Image.fromarray(img)
if (self.transform is not None):
if self.train:
(img, img_aug) = self.transform(img)
else:
(img, _) = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
if self.train:
return (img, img_aug, target)
else:
return (img, img, target)
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def get_image_class(self, label):
return self.train_data[(np.array(self.train_labels) == label)]
def get_original_label(self, rnd_label):
return self.class_mapping[rnd_label]
def append(self, images, labels):
self.train_data = np.concatenate((self.train_data, images), axis=0)
self.train_labels = (self.train_labels + labels) |
def iod(det_x, det_y, gt_x, gt_y):
if (approx_area_of_intersection(det_x, det_y, gt_x, gt_y) > 1):
ymax = (np.maximum(np.max(det_y), np.max(gt_y)) + 1)
xmax = (np.maximum(np.max(det_x), np.max(gt_x)) + 1)
bin_mask = np.zeros((ymax, xmax))
det_bin_mask = np.zeros_like(bin_mask)
gt_bin_mask = np.zeros_like(bin_mask)
(rr, cc) = polygon(det_y, det_x)
det_bin_mask[(rr, cc)] = 1
(rr, cc) = polygon(gt_y, gt_x)
gt_bin_mask[(rr, cc)] = 1
final_bin_mask = (det_bin_mask + gt_bin_mask)
inter_map = np.where((final_bin_mask == 2), 1, 0)
inter = np.round(np.sum(inter_map), 2)
det = np.round(np.sum(det_bin_mask), 2)
return (inter / float((det + 1.0)))
else:
return 0 |
class BigBirdTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = BigBirdTokenizer
model_input_names = ['input_ids', 'attention_mask']
prefix_tokens: List[int] = []
def __init__(self, vocab_file=None, tokenizer_file=None, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', sep_token='[SEP]', mask_token='[MASK]', cls_token='[CLS]', **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [(1 if (x in [self.sep_token_id, self.cls_token_id]) else 0) for x in token_ids_0]
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class LlamaLoraKbitEngine(CausalLoraKbitEngine):
config_name: str = 'llama_lora_kbit_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
model_name = 'decapoda-research/llama-7b-hf'
tokenizer = LlamaTokenizer.from_pretrained(model_name, add_bos_token=False)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
super().__init__(model_name=model_name, weights_path=None, tokenizer=tokenizer, target_modules=['q_proj', 'v_proj'], load_4bit=True) |
def register_Ns3ObjectPtrContainerChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::ObjectPtrContainerChecker const &', 'arg0')])
cls.add_method('GetItemTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True)
return |
def rerank(args):
if (type(args.lenpen) is not list):
args.lenpen = [args.lenpen]
if (type(args.weight1) is not list):
args.weight1 = [args.weight1]
if (type(args.weight2) is not list):
args.weight2 = [args.weight2]
if (type(args.weight3) is not list):
args.weight3 = [args.weight3]
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
for shard_id in shard_ids:
(pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir) = rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
rerank_generate.gen_and_reprocess_nbest(args)
rerank_score_bw.score_bw(args)
rerank_score_lm.score_lm(args)
if (args.write_hypos is None):
write_targets = (pre_gen + '/matched_targets')
write_hypos = (pre_gen + '/matched_hypos')
else:
write_targets = ((args.write_hypos + '_targets') + args.gen_subset)
write_hypos = ((args.write_hypos + '_hypos') + args.gen_subset)
if args.all_shards:
write_targets += '_all_shards'
write_hypos += '_all_shards'
(best_lenpen, best_weight1, best_weight2, best_weight3, best_score) = match_target_hypo(args, write_targets, write_hypos)
return (best_lenpen, best_weight1, best_weight2, best_weight3, best_score) |
class DataCollatorForLanguageModeling():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class SimpleGaussianMLPModel(Model):
def __init__(self, output_dim, name='SimpleGaussianMLPModel', *args, **kwargs):
super().__init__(name)
self.output_dim = output_dim
def network_output_spec(self):
return ['mean', 'log_std', 'std_param', 'dist']
def _build(self, obs_input, name=None):
return_var = tf.compat.v1.get_variable('return_var', (), initializer=tf.constant_initializer(0.5))
mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim), np.log(0.5))
dist = DiagonalGaussian(self.output_dim)
return (mean, log_std, log_std, dist) |
.datainstrument
def test_restore():
def tester(A: dace.float64[(20, 20)]):
return (A + 5)
sdfg = tester.to_sdfg(simplify=True)
_instrument(sdfg, dace.DataInstrumentationType.Save)
A = np.random.rand(20, 20)
acopy = np.copy(A)
result = sdfg(A)
assert np.allclose(result, (A + 5))
dreport = sdfg.get_instrumented_data()
_instrument(sdfg, dace.DataInstrumentationType.Restore)
A[:] = 5
result = sdfg.call_with_instrumented_data(dreport, A)
assert np.allclose(result, (acopy + 5)) |
def coco_evaluation(dataset, predictions, output_folder, box_only, iou_types, expected_results, expected_results_sigma_tol):
if isinstance(dataset, COCODataset):
return do_orig_coco_evaluation(dataset=dataset, predictions=predictions, box_only=box_only, output_folder=output_folder, iou_types=iou_types, expected_results=expected_results, expected_results_sigma_tol=expected_results_sigma_tol)
elif isinstance(dataset, AbstractDataset):
return do_wrapped_coco_evaluation(dataset=dataset, predictions=predictions, box_only=box_only, output_folder=output_folder, iou_types=iou_types, expected_results=expected_results, expected_results_sigma_tol=expected_results_sigma_tol)
else:
raise NotImplementedError(('Ground truth dataset is not a COCODataset, nor it is derived from AbstractDataset: type(dataset)=%s' % type(dataset))) |
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img, mask):
rotate_degree = (((random.random() * 2) * self.degree) - self.degree)
return (img.rotate(rotate_degree, Image.BILINEAR), mask.rotate(rotate_degree, Image.NEAREST)) |
class extractSDAE(nn.Module):
def __init__(self, dim, slope=0.0):
super(extractSDAE, self).__init__()
self.in_dim = dim[0]
self.nlayers = (len(dim) - 1)
self.reluslope = slope
(self.enc, self.dec) = ([], [])
for i in range(self.nlayers):
self.enc.append(nn.Linear(dim[i], dim[(i + 1)]))
setattr(self, 'enc_{}'.format(i), self.enc[(- 1)])
self.dec.append(nn.Linear(dim[(i + 1)], dim[i]))
setattr(self, 'dec_{}'.format(i), self.dec[(- 1)])
self.base = []
for i in range(self.nlayers):
self.base.append(nn.Sequential(*self.enc[:i]))
for m in self.modules():
if isinstance(m, nn.Linear):
init.normal(m.weight, std=0.01)
if (m.bias.data is not None):
init.constant(m.bias, 0)
def forward(self, x):
inp = x.view((- 1), self.in_dim)
encoded = inp
for (i, encoder) in enumerate(self.enc):
encoded = encoder(encoded)
if (i < (self.nlayers - 1)):
encoded = F.leaky_relu(encoded, negative_slope=self.reluslope)
out = encoded
for (i, decoder) in reversed(list(enumerate(self.dec))):
out = decoder(out)
if i:
out = F.leaky_relu(out, negative_slope=self.reluslope)
return (encoded, out) |
def train_intent_predictor(base_model: TypedModel, args, wandb, optimizer, scheduler, train_dataloader, dev_dataloader, epochs=10, gpus=[], max_grad_norm=1.0):
if (len(gpus) > 1):
parallel_model = nn.DataParallel(base_model, device_ids=gpus).cuda()
elif (len(gpus) == 1):
parallel_model = base_model.cuda()
else:
parallel_model = base_model
f_write = open(os.path.join(args.output_dir, 'train_log.txt'), 'w')
step = 0
(best_acc, patience) = (0, 0)
for epoch_num in range(epochs):
base_model.train()
parallel_model.train()
train_loss_tracker = []
for (di, data) in enumerate(tqdm(train_dataloader)):
if ((di == 0) and (epoch_num == 0)):
print()
for (k, v) in data.items():
if torch.is_tensor(v):
print('[INFO] ', k, v.size())
else:
print('[INFO] ', k, v[:5])
loss = parallel_model(data)
if (len(gpus) > 1):
loss = loss.mean()
loss.backward()
torch.nn.utils.clip_grad_norm_(parallel_model.parameters(), max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
step += 1
train_loss_tracker.append(loss.item())
if (args.wandb and ((step % 10) == 0)):
wandb.log({'avg_training_loss': np.mean(train_loss_tracker)})
(eval_loss, eval_prediction, eval_truth) = evaluate_module_index(base_model, dev_dataloader)
acc = compute_accuracy(eval_prediction, eval_truth)
f1 = f1_score(y_pred=eval_prediction, y_true=eval_truth, average='macro')
train_loss = np.mean(train_loss_tracker)
print_text = 'epoch {}, train loss: {}, dev loss: {:.4f}, dev acc: {:.4f}, dev f1-macro: {:.4f}'.format(epoch_num, train_loss, eval_loss, acc, f1)
print(print_text)
f_write.write((print_text + '\n'))
if args.wandb:
wandb.log({'eval_loss': eval_loss})
wandb.log({'dev_acc': acc})
if (f1 > best_acc):
model_to_save = (base_model.module if hasattr(base_model, 'module') else base_model)
print('Saving model checkpoint to %s', args.output_dir)
torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, 'pytorch.bin'))
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
torch.save(optimizer.state_dict(), os.path.join(args.output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(args.output_dir, 'scheduler.pt'))
best_acc = f1
patience = 0
else:
patience += 1
print('[INFO] Patience {}/{}'.format(patience, args.patience))
f_write.write('Best Dev Acc: {:.5f}\n'.format(best_acc))
if (patience > args.patience):
print('[INFO] Run out of patience...')
break
f_write.close() |
def make_cnn(convs, padding, inpt, initializer=None):
if (initializer is None):
initializer = tf.orthogonal_initializer(np.sqrt(2.0))
out = inpt
with tf.variable_scope('convnet'):
for (num_outputs, kernel_size, stride) in convs:
out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, padding=padding, activation_fn=tf.nn.relu, weights_initializer=initializer)
return out |
class Texture1D(object):
def __init__(self, levels, internalformat, W):
self.__id = np.empty(1, dtype=np.uint32)
glCreateTextures(GL_TEXTURE_1D, len(self.__id), self.__id)
glTextureStorage1D(self.__id[0], levels, internalformat, W)
self.__handle = None
def setFilter(self, min_filter, max_filter):
glTextureParameteri(self.__id[0], GL_TEXTURE_MIN_FILTER, min_filter)
glTextureParameteri(self.__id[0], GL_TEXTURE_MAG_FILTER, max_filter)
def setWrap(self, wrap_s, wrap_t, wrap_r=None):
glTextureParameteri(self.__id[0], GL_TEXTURE_WRAP_S, wrap_s)
glTextureParameteri(self.__id[0], GL_TEXTURE_WRAP_T, wrap_t)
if (wrap_r != None):
glTextureParameteri(self.__id[0], GL_TEXTURE_WRAP_R, wrap_r)
def subImage(self, level, xoffset, width, data_format, data_type, pixels):
glTextureSubImage1D(self.__id[0], level, xoffset, width, data_format, data_type, pixels)
def generate_mipmap(self):
glGenerateTextureMipmap(self.__id[0])
def makeResident(self):
self.__handle = glGetTextureHandleNV(self.__id[0])
glMakeTextureHandleResidentNV(self.__handle)
return self.__handle
def makeNonResident(self):
if (self.__handle != None):
glMakeTextureHandleNonResidentNV(self.__handle)
def delete(self):
glDeleteTextures(1, self.__id)
def handle(self):
return self.__handle
def id(self):
return self.__id[0] |
class GRUModel(Model):
def __init__(self, output_dim, hidden_dim, name=None, hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, layer_normalization=False):
super().__init__(name)
self._output_dim = output_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._initialize()
def _initialize(self):
self._gru_cell = tf.keras.layers.GRUCell(units=self._hidden_dim, activation=self._hidden_nonlinearity, kernel_initializer=self._hidden_w_init, bias_initializer=self._hidden_b_init, recurrent_activation=self._recurrent_nonlinearity, recurrent_initializer=self._recurrent_w_init, name='gru_layer')
self._output_nonlinearity_layer = tf.keras.layers.Dense(units=self._output_dim, activation=self._output_nonlinearity, kernel_initializer=self._output_w_init, bias_initializer=self._output_b_init, name='output_layer')
def network_input_spec(self):
return ['full_input', 'step_input', 'step_hidden_input']
def network_output_spec(self):
return ['all_output', 'step_output', 'step_hidden', 'init_hidden']
def _build(self, all_input_var, step_input_var, step_hidden_var, name=None):
del name
return gru(name='gru', gru_cell=self._gru_cell, all_input_var=all_input_var, step_input_var=step_input_var, step_hidden_var=step_hidden_var, hidden_state_init=self._hidden_state_init, hidden_state_init_trainable=self._hidden_state_init_trainable, output_nonlinearity_layer=self._output_nonlinearity_layer)
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_gru_cell']
del new_dict['_output_nonlinearity_layer']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def main(args):
args = parse_args(args)
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if (args.options is not None):
cfg.merge_from_dict(args.options)
cfg = update_legacy_cfg(cfg)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
cfg.data.test.pipeline[1].img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
(pathDict, logger, json_file) = update_configs_with_eval_paths(cfg)
checkpoint = load_checkpoint(model, pathDict['checkpoint_file_path'], map_location='cpu', revise_keys=[('^module\\.', ''), ('model.', '')])
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
print('"CLASSES" not found in meta, use dataset.CLASSES instead')
model.CLASSES = dataset.CLASSES
if ('PALETTE' in checkpoint.get('meta', {})):
model.PALETTE = checkpoint['meta']['PALETTE']
else:
print('"PALETTE" not found in meta, use dataset.PALETTE instead')
model.PALETTE = dataset.PALETTE
efficient_test = False
if (args.eval_options is not None):
efficient_test = args.eval_options.get('efficient_test', False)
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
eval_kwargs = {}
eval_kwargs['eval_kwargs'] = cfg['evaluation']
outputs = single_gpu_test(model, data_loader, show=False, out_dir=None, efficient_test=False, opacity=0.5, logger=logger, **eval_kwargs)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, efficient_test)
(rank, _) = get_dist_info()
if (rank == 0):
kwargs = {}
kwargs['logger'] = logger
ignore_key_list = ['interval']
for key in cfg['evaluation'].keys():
if (key not in ignore_key_list):
kwargs[key] = cfg['evaluation'][key]
metric = dataset.evaluate(outputs, **kwargs)
logger.info(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((json_file is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file) |
def get_stats(lab_fp, score_fp, pred_thresh=None):
pred_thresh = (None if (pred_thresh is None) else float(pred_thresh))
(lab_with_pred_l, lab_pred_d, tot_pred_d, tot_insts) = read_file_multi_lab(lab_fp, score_fp, pred_thresh)
print('Number Instances:', tot_insts)
(max_lab, max_c) = max(collections.Counter(lab_with_pred_l).items(), key=(lambda x: x[1]))
print('Max Label', max_c, max_lab)
print('Baseline:', ('%.3f' % (max_c / tot_insts)))
print('\nPrecision Recall:')
for (lab, pred_d) in sorted(lab_pred_d.items(), key=(lambda x: sum(x[1].values())), reverse=True):
if (lab == 'NONE'):
continue
num_lab_insts = sum(pred_d.values())
rec = (pred_d[lab] / num_lab_insts)
if (tot_pred_d[lab] > 0):
prec = (pred_d[lab] / tot_pred_d[lab])
else:
prec = 0
print('\t'.join([str(num_lab_insts), ('%.3f' % prec), ('%.3f' % rec), lab])) |
def get_num_layer_for_vit(var_name, num_max_layer):
if (('embedding' in var_name) or ('conv1' in var_name) or ('ln_pre' in var_name)):
return 0
elif ('resblocks' in var_name):
layer_id = int(var_name.split('.')[5])
return (layer_id + 1)
elif ('classifier' in var_name):
return (num_max_layer - 1)
else:
return (num_max_layer - 2) |
_method('Intracomm', 'Irecv')
def _intracomm_irecv(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, icomm: 'Intracomm', buffer: str, src: Union[(str, sp.Expr, Number)], tag: Union[(str, sp.Expr, Number)]):
from mpi4py import MPI
(icomm_name, icomm_obj) = icomm
if (icomm_obj != MPI.COMM_WORLD):
raise ValueError('Only the mpi4py.MPI.COMM_WORLD Intracomm is supported in DaCe Python programs.')
(req, _) = sdfg.add_array('irecv_req', [1], dace.dtypes.opaque('MPI_Request'), transient=True, find_new_name=True)
_irecv(pv, sdfg, state, buffer, src, tag, req)
return req |
def write(self, filename):
nodes = []
edges = []
options = {}
for (n, i) in enumerate(self.inputs()):
nodes.append({'id': i.unique(), 'label': 'input {}'.format(n), 'shape': 'square'})
existing = set()
def add_edge(i_, n):
i = (i_ if (i_.kind() != 'Select') else i_.input())
if ((i, n) in existing):
return
existing.add((i, n))
e = {'from': n.unique(), 'to': i.unique(), 'arrows': 'from'}
if (i.stage() != n.stage()):
e['color'] = 'green'
edges.append(e)
counts = {}
offset = 0
for n in self.nodes():
if ((len(n.uses()) == 0) or (n.kind() == 'Undefined')):
continue
ident = counts.get(n.kind(), 0)
counts[n.kind()] = (ident + 1)
d = {'id': n.unique(), 'label': '{}_{}'.format(n.kind(), ident), 'y': offset, 'fixed': {'y': True}}
if (n in self.outputs()):
d['shape'] = 'triangle'
for i in n.inputs():
add_edge(i, n)
nodes.append(d)
offset += 30
result = _vis_template.substitute(nodes=json.dumps(nodes), edges=json.dumps(edges), options=json.dumps(options), name=filename)
with open(filename, 'w') as f:
f.write(result) |
def draw_rectangle():
root = Tk()
root.title('Rectangle Drawer')
drawer = RectangleDrawer(root)
def on_enter_press(event):
root.quit()
root.bind('<Return>', on_enter_press)
root.mainloop()
rectangles = drawer.get_rectangles()
new_rects = []
for r in rectangles:
new_rects.extend(r)
return new_rects |
def test_transformation_pipeline_is_lossy(named_tensor):
transformer1 = Float32NumpyArrayToBytes()
transformer2 = Float32NumpyArrayToBytes()
transformer2.lossy = True
tp = TransformationPipeline([transformer1, transformer2])
is_lossy = tp.is_lossy()
assert (is_lossy is True) |
def p_template_definition(s):
name = p_ident(s)
if (s.sy == '='):
s.expect('=')
s.expect('*')
required = False
else:
required = True
return (name, required) |
def count_elements(level):
golds = list()
for i in range(level.h):
for j in range(level.w):
if (level[(i, j)] == 'G'):
golds.append((i, j))
return golds |
def convert_dbpointer_to_text_nmatch(vect, goal, belief):
domain_in_pointer = ['restaurant', 'hotel', 'attraction', 'train']
restaurant_book_vec = vect[24:26]
hotel_book_vec = vect[26:28]
train_book_vec = vect[28:]
text = []
for idx in range(4):
domain = domains[idx]
if (domain not in goal):
continue
Flag = False
for bs in belief:
if (bs[0] == domain):
Flag = True
if (not Flag):
continue
domain_vec = vect[(idx * 6):((idx * 6) + 6)]
if (domain != 'train'):
if np.all((domain_vec == np.array([1, 0, 0, 0, 0, 0]))):
domain_match = 0
elif np.all((domain_vec == np.array([0, 1, 0, 0, 0, 0]))):
domain_match = 1
elif np.all((domain_vec == np.array([0, 0, 1, 0, 0, 0]))):
domain_match = 2
elif np.all((domain_vec == np.array([0, 0, 0, 1, 0, 0]))):
domain_match = 3
elif np.all((domain_vec == np.array([0, 0, 0, 0, 1, 0]))):
domain_match = 4
elif np.all((domain_vec == np.array([0, 0, 0, 0, 0, 1]))):
domain_match = 5
else:
raise ValueError('invalid domain match')
if (domain_match >= 5):
domain_match_text = '>=5'
else:
domain_match_text = '={}'.format(domain_match)
text.append('{} match{}'.format(domain, domain_match_text))
else:
if np.all((domain_vec == np.array([1, 0, 0, 0, 0, 0]))):
domain_match = 0
elif np.all((domain_vec == np.array([0, 1, 0, 0, 0, 0]))):
domain_match = 2
elif np.all((domain_vec == np.array([0, 0, 1, 0, 0, 0]))):
domain_match = 5
elif np.all((domain_vec == np.array([0, 0, 0, 1, 0, 0]))):
domain_match = 10
elif np.all((domain_vec == np.array([0, 0, 0, 0, 1, 0]))):
domain_match = 40
elif np.all((domain_vec == np.array([0, 0, 0, 0, 0, 1]))):
domain_match = 41
else:
raise ValueError('invalid domain match')
if (domain_match == 0):
domain_match_text = '=0'
elif (domain_match == 2):
domain_match_text = '<3'
elif (domain_match == 5):
domain_match_text = '<6'
elif (domain_match == 10):
domain_match_text = '<11'
elif (domain_match == 40):
domain_match_text = '<41'
else:
domain_match_text = '>40'
text.append('{} match{}'.format(domain, domain_match_text))
return ' , '.join(text) |
class KeywordExtractor():
defaults: Dict[(str, Any)] = {'candidate_selection': 'ngram'}
def __init__(self, nlp: Language, **overrides):
self.nlp = nlp
self.cfg = self.defaults.copy()
self.cfg.update(overrides)
def __call__(self, doc: Doc) -> Doc:
self.init_component()
doc._.kw_candidates = self.candidate_selection(doc)
return doc
def init_component(self):
if (not Doc.has_extension('extract_keywords')):
Doc.set_extension('extract_keywords', method=self.extract_keywords)
if (not Doc.has_extension('kw_candidates')):
Doc.set_extension('kw_candidates', default=None)
def render(self, doc: Doc, jupyter=None, **kw_kwargs):
doc = self(doc)
spans = doc._.extract_keywords(**kw_kwargs)
examples = [{'text': doc.text, 'title': None, 'ents': sorted([{'start': span.start_char, 'end': span.end_char, 'label': f'{round(score, 3)}'} for (span, score) in spans], key=(lambda x: x['start']))}]
html = displacy.render(examples, style='ent', manual=True, jupyter=jupyter)
return html
def extract_keywords(self, doc: Doc, n=10, similarity_thresh=0.45):
spans = []
candidates_weighted = self.candidate_weighting(doc)
for (candidate, candidate_w) in candidates_weighted:
if (similarity_thresh > 0.0):
redundant = False
for (prev_candidate, _) in spans:
if (candidate.similarity(prev_candidate) > similarity_thresh):
redundant = True
break
if redundant:
continue
spans.append((candidate, candidate_w))
if (len(spans) >= n):
break
spans = spans[:min(n, len(spans))]
spans = [(c.surface_forms[0], score) for (c, score) in spans]
return spans
def candidate_weighting(self, doc: Doc) -> List[Tuple[(Candidate, Any)]]:
return [(c, 1.0) for c in doc._.kw_candidates]
def candidate_selection(self, doc: Doc) -> Iterable[Candidate]:
candidate_selection = registry.candidate_selection.get(self.cfg['candidate_selection'])
return candidate_selection(doc) |
class amsoftmax(nn.Module):
def __init__(self, input_size: int, output_size: int, margin: float=0.2, scale: float=30):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.margin = margin
self.scale = scale
self.W = torch.nn.Parameter(torch.randn(input_size, output_size), requires_grad=True)
self.ce = nn.CrossEntropyLoss()
nn.init.xavier_normal_(self.W, gain=1)
def input_size(self):
return self._indim
def output_size(self):
return self._outdim
def forward(self, x: torch.Tensor, label: torch.LongTensor):
assert (x.size()[0] == label.size()[0])
assert (x.size()[1] == self.input_size)
x_norm = torch.norm(x, p=2, dim=1, keepdim=True).clamp(min=1e-12)
x_norm = torch.div(x, x_norm)
w_norm = torch.norm(self.W, p=2, dim=0, keepdim=True).clamp(min=1e-12)
w_norm = torch.div(self.W, w_norm)
costh = torch.mm(x_norm, w_norm)
label_view = label.view((- 1), 1)
if label_view.is_cuda:
label_view = label_view.cpu()
delt_costh = torch.zeros(costh.size()).scatter_(1, label_view, self.margin)
if x.is_cuda:
delt_costh = delt_costh.cuda()
costh_m = (costh - delt_costh)
costh_m_s = (self.scale * costh_m)
loss = self.ce(costh_m_s, label)
return (loss, costh_m_s) |
def main(args, init_distributed=False):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
metrics.reset()
if (torch.cuda.is_available() and (not args.cpu)):
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
logger.info(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad))))
if (args.model_parallel_size == 1):
trainer = Trainer(args, task, model, criterion)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info('training on {} GPUs'.format(args.distributed_world_size))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences))
(extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer)
max_epoch = (args.max_epoch or math.inf)
max_update = (args.max_update or math.inf)
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while ((lr > args.min_lr) and (epoch_itr.next_epoch_idx <= max_epoch)):
valid_losses = train(args, trainer, task, epoch_itr, max_update)
if (should_stop_early(args, valid_losses[0]) or (trainer.get_num_updates() >= max_update)):
break
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(epoch_itr.next_epoch_idx, load_dataset=(os.pathsep in getattr(args, 'data', '')))
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum)) |
def _add_ndarray(name: str, obj: ndarray, attributes: Dict[(str, Any)], ndarrays: Dict[(str, ndarray)], objects: Dict[(str, object)]) -> Tuple[(Dict, Dict, Dict)]:
ndarrays[name] = obj
return (attributes, ndarrays, objects) |
def get_distributed_sampler(trainer, dataset, train, **kwargs) -> torch.utils.data.sampler.Sampler:
world_size = {'ddp': (trainer.num_nodes * trainer.num_processes), 'ddp_spawn': (trainer.num_nodes * trainer.num_processes), 'ddp2': trainer.num_nodes, 'ddp_cpu': (trainer.num_processes * trainer.num_nodes)}
assert (trainer.distributed_backend is not None)
kwargs = dict(num_replicas=world_size[trainer.distributed_backend], rank=trainer.global_rank)
kwargs['shuffle'] = (train and (not trainer.overfit_batches))
sampler = DistributedSampler(dataset, **kwargs)
return sampler |
def layer(x, block, ochannels, count, stride, cfg, test):
for i in range(count):
with nn.parameter_scope('layer{}'.format((i + 1))):
x = block(x, ochannels, (stride if (i == 0) else (1, 1)), cfg, test)
return x |
class MNIST_L2_DRP05(nn.Module):
def __init__(self, dropout=0.5):
super(MNIST_L2_DRP05, self).__init__()
self.dropout = dropout
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.relu = nn.ReLU(True)
self.pool = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(((64 * 4) * 4), 10)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.relu(self.pool(self.conv1(x)))
x = self.relu(self.pool(self.conv2(x)))
x = x.view((- 1), ((64 * 4) * 4))
x = self.dropout(x)
x = self.fc1(x)
return F.log_softmax(x, dim=1) |
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, reverse=False):
super().__init__()
if reverse:
self.double_conv = nn.Sequential(Conv3x3BNReLU(in_channels, in_channels, stride=1), Conv3x3BNReLU(in_channels, out_channels, stride=1))
else:
self.double_conv = nn.Sequential(Conv3x3BNReLU(in_channels, out_channels, stride=1), Conv3x3BNReLU(out_channels, out_channels, stride=1))
def forward(self, x):
return self.double_conv(x) |
def cosine_loss(p_logits, q_logits):
return torch.nn.CosineEmbeddingLoss()(q_logits, p_logits.detach(), torch.ones(p_logits.shape[0]).cuda()) |
class Partition9(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[3]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[4]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[5]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:9'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.3', 'l_1': 'decoder.block.4', 'l_2': 'decoder.block.5'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
return list(flatten((x0, x1, x2, t_1, t_2, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def _repr_labellist(self) -> str:
items = [self[i] for i in range(min(1, len(self.items)))]
res = f'''{self.__class__.__name__} ({len(self.items)} items)
'''
res += f'''x: {self.x.__class__.__name__}
{show_some([i[0] for i in items], n_max=1)}
'''
res += f'''y: {self.y.__class__.__name__}
{show_some([i[1] for i in items], n_max=1)}
'''
return (res + f'Path: {self.path}') |
class SignatureEx(inspect.Signature):
def drop_arg(self, argname, raise_if_not_found=False):
ps = dict(self.parameters.items())
if (argname in ps):
del ps[argname]
elif raise_if_not_found:
raise KeyError(f"'{argname}' not found in {list(ps.keys())}")
return self.replace(parameters=ps.values())
def add_arg(self, argname, kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, *, default=inspect.Parameter.empty, annotation=inspect.Parameter.empty):
ps = list(self.parameters.values())
ind = _find_insertion_index_by_kind(ps, kind)
ps.insert(ind, inspect.Parameter(argname, kind, default=default, annotation=annotation))
return self.replace(parameters=ps)
def has_return_annotation(self):
return (self.return_annotation is not self.empty)
def format_argument_signature(self):
return _extract_argument_signature(str(self))
def format_return_annotation(self):
return _extract_return_annotation(str(self), self.has_return_annotation)
def format_caller_argument_signature(self):
return _create_caller_signature(self) |
def prepare_params(kwargs):
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = (np.array(kwargs['max_u']) if (type(kwargs['max_u']) == list) else kwargs['max_u'])
kwargs['gamma'] = (1.0 - (1.0 / kwargs['T']))
if ('lr' in kwargs):
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers', 'network_class_actor_critic', 'network_class_discriminator', 'polyak', 'batch_size', 'Q_lr', 'pi_lr', 'mi_lr', 'sk_lr', 'norm_eps', 'norm_clip', 'max_u', 'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs[('_' + name)] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs |
def register_Ns3SimpleRefCount__Ns3MmWaveHarqPhy_Ns3Empty_Ns3DefaultDeleter__lt__ns3MmWaveHarqPhy__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::MmWaveHarqPhy, ns3::empty, ns3::DefaultDeleter< ns3::MmWaveHarqPhy > > const &', 'o')])
return |
_grad()
def eval(epoch, model, dataloader, cfg, logger, writer):
logger.info('Validation')
(pred_insts, gt_insts) = ([], [])
progress_bar = tqdm(total=len(dataloader))
val_dataset = dataloader.dataset
model.eval()
for batch in dataloader:
result = model(batch, mode='predict')
pred_insts.append(result['pred_instances'])
gt_insts.append(result['gt_instances'])
progress_bar.update()
progress_bar.close()
logger.info('Evaluate instance segmentation')
scannet_eval = ScanNetEval(val_dataset.CLASSES)
eval_res = scannet_eval.evaluate(pred_insts, gt_insts)
writer.add_scalar('val/AP', eval_res['all_ap'], epoch)
writer.add_scalar('val/AP_50', eval_res['all_ap_50%'], epoch)
writer.add_scalar('val/AP_25', eval_res['all_ap_25%'], epoch)
logger.info('AP: {:.3f}. AP_50: {:.3f}. AP_25: {:.3f}'.format(eval_res['all_ap'], eval_res['all_ap_50%'], eval_res['all_ap_25%']))
save_file = osp.join(cfg.work_dir, f'epoch_{epoch:04d}.pth')
gorilla.save_checkpoint(model, save_file) |
def _copy_location(newnode, node):
return ast.fix_missing_locations(ast.copy_location(newnode, node)) |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('ApplicationContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Application > > const_iterator', u'ns3::ApplicationContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Application > > const_iterator*', u'ns3::ApplicationContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Application > > const_iterator&', u'ns3::ApplicationContainer::Iterator&')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
module.add_class('Average', import_from_module='ns.stats', template_parameters=['double'])
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DataOutputCallback', allow_subclassing=True, import_from_module='ns.stats')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', template_parameters=['ns3::RadvdInterface'])
module.add_class('DefaultDeleter', template_parameters=['ns3::RadvdPrefix'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('DhcpHelper')
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('InetSocketAddress', import_from_module='ns.network')
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
module.add_class('Ipv4InterfaceContainer', import_from_module='ns.internet')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv4 >, unsigned int > > const_iterator', u'ns3::Ipv4InterfaceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv4 >, unsigned int > > const_iterator*', u'ns3::Ipv4InterfaceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv4 >, unsigned int > > const_iterator&', u'ns3::Ipv4InterfaceContainer::Iterator&')
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Mac8Address', import_from_module='ns.network')
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', u'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', u'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', u'ns3::NetDeviceContainer::Iterator&')
module.add_class('NodeContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator', u'ns3::NodeContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator*', u'ns3::NodeContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator&', u'ns3::NodeContainer::Iterator&')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('Ping6Helper')
module.add_class('RadvdHelper')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('StatisticalSummary', allow_subclassing=True, import_from_module='ns.stats')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
module.add_class('V4PingHelper')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RadvdInterface', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RadvdInterface>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RadvdPrefix', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RadvdPrefix>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Application', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time const &, ns3::Address const & )', u'ns3::Application::DelayAddressCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time const &, ns3::Address const & )*', u'ns3::Application::DelayAddressCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time const &, ns3::Address const & )&', u'ns3::Application::DelayAddressCallback&')
typehandlers.add_type_alias(u'void ( * ) ( std::string const &, std::string const & )', u'ns3::Application::StateTransitionCallback')
typehandlers.add_type_alias(u'void ( * ) ( std::string const &, std::string const & )*', u'ns3::Application::StateTransitionCallback*')
typehandlers.add_type_alias(u'void ( * ) ( std::string const &, std::string const & )&', u'ns3::Application::StateTransitionCallback&')
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DataCalculator', import_from_module='ns.stats', parent=root_module['ns3::Object'])
module.add_class('DataOutputInterface', import_from_module='ns.stats', parent=root_module['ns3::Object'])
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('DhcpClient', parent=root_module['ns3::Application'])
module.add_class('DhcpHeader', parent=root_module['ns3::Header'])
module.add_enum('Options', ['OP_MASK', 'OP_ROUTE', 'OP_ADDREQ', 'OP_LEASE', 'OP_MSGTYPE', 'OP_SERVID', 'OP_RENEW', 'OP_REBIND', 'OP_END'], outer_class=root_module['ns3::DhcpHeader'])
module.add_enum('Messages', ['DHCPDISCOVER', 'DHCPOFFER', 'DHCPREQ', 'DHCPACK', 'DHCPNACK'], outer_class=root_module['ns3::DhcpHeader'])
module.add_class('DhcpServer', parent=root_module['ns3::Application'])
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('MinMaxAvgTotalCalculator', import_from_module='ns.stats', template_parameters=['double'], parent=[root_module['ns3::DataCalculator'], root_module['ns3::StatisticalSummary']])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::DeviceAdditionListener&')
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('Ping6', parent=root_module['ns3::Application'])
module.add_class('Radvd', parent=root_module['ns3::Application'])
module.add_class('RadvdInterface', parent=root_module['ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >'])
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > >', u'ns3::RadvdInterface::RadvdPrefixList')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > >*', u'ns3::RadvdInterface::RadvdPrefixList*')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > >&', u'ns3::RadvdInterface::RadvdPrefixList&')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > > iterator', u'ns3::RadvdInterface::RadvdPrefixListI')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > > iterator*', u'ns3::RadvdInterface::RadvdPrefixListI*')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > > iterator&', u'ns3::RadvdInterface::RadvdPrefixListI&')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > > const_iterator', u'ns3::RadvdInterface::RadvdPrefixListCI')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > > const_iterator*', u'ns3::RadvdInterface::RadvdPrefixListCI*')
typehandlers.add_type_alias(u'std::list< ns3::Ptr< ns3::RadvdPrefix > > const_iterator&', u'ns3::RadvdInterface::RadvdPrefixListCI&')
module.add_class('RadvdPrefix', parent=root_module['ns3::SimpleRefCount< ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('V4Ping', parent=root_module['ns3::Application'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Time', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
module.add_container('std::list< ns3::Ptr< ns3::RadvdPrefix > >', 'ns3::Ptr< ns3::RadvdPrefix >', container_type=u'list')
module.add_container('ns3::RadvdInterface::RadvdPrefixList', 'ns3::Ptr< ns3::RadvdPrefix >', container_type=u'list')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module) |
def make_fpga_state(sdfg):
state = sdfg.add_state('mm')
sdfg.add_stream('A_pipe', dace.float32, transient=True, shape=((P + 1),), storage=dace.dtypes.StorageType.FPGA_Local, buffer_size='P')
sdfg.add_stream('B_pipe', dace.float32, transient=True, shape=((P + 1),), storage=dace.dtypes.StorageType.FPGA_Local)
sdfg.add_stream('C_pipe', dace.float32, transient=True, shape=(P,), storage=dace.dtypes.StorageType.FPGA_Local)
make_read_A(state)
make_read_B(state)
make_compute(sdfg, state)
make_write_C(state)
return state |
class Lexer(object):
lex = NotImplemented
def make_lexer_state(self, text):
line_ctr = LineCounter((b'\n' if isinstance(text, bytes) else '\n'))
return LexerState(text, line_ctr) |
def remove_index_types(data):
print('\tRemoving index types ...')
for i in range(len(data)):
for j in range(len(data[i])):
if ((re.match('<%ID> = extractelement', data[i][j]) is not None) or (re.match('<%ID> = insertelement', data[i][j]) is not None)):
data[i][j] = re.sub('i\\d+ ', '<TYP> ', data[i][j])
return data |
def prepare_resnet50_jit(bench_args):
model = resnet50()
inputs = (torch.randn(32, 3, 224, 224),)
model = torch.jit.trace(model, inputs)
return (inputs, model) |
def save_config(config_dict, fname=None):
with open(fname, mode='w', encoding='utf-8') as f:
json.dump(config_dict, f) |
def evalSymbReg(individual, points):
func = toolbox.compile(expr=individual)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
func_vals = np.array([func(x) for x in points])
sqerrors = ((func_vals - ref_vals) ** 2.0)
fitness = [np.real(np.mean(sqerrors))]
spline_func = scipy.interpolate.splrep(points, func_vals, k=3)
spline_dfunc = scipy.interpolate.splev(points, spline_func, der=1)
spline_dfunc2 = scipy.interpolate.splev(points, spline_func, der=2)
spline_score = np.real(np.mean(((spline_dfunc - spline_dref) ** 2.0)))
spline_score2 = np.real(np.mean(((spline_dfunc2 - spline_dref2) ** 2.0)))
except Exception:
fitness = [100.0]
spline_score = 100.0
spline_score2 = 100.0
length = len(individual)
height = individual.height
features = [length, height, spline_score, spline_score2]
return [fitness, features] |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * planes))
self.mish = Mish()
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes)))
def forward(self, x):
out = self.mish(self.bn1(self.conv1(x)))
out = self.mish(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.mish(out)
return out |
def recall(candidate, source, gold_edits, max_unchanged_words=2, beta=0.5, verbose=False):
return pre_rec_f1(candidate, source, gold_edits, max_unchanged_words, beta, verbose)[1] |
class CategoriesSampler():
def __init__(self, labels, frame_intervals, n_per):
self.frame_intervals = frame_intervals
self.n_sample = len(labels)
self.n_batch = (self.n_sample // n_per)
self.n_per = n_per
self.scenes = []
self.scene_id = {}
for (idx, label) in enumerate(labels):
scene_name = label['scene_name']
if (scene_name not in self.scene_id.keys()):
self.scene_id.update({scene_name: 0})
self.scene_id[scene_name] += 1
self.scenes.append(scene_name)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
frame_a = torch.randperm(self.n_sample)[:self.n_per]
for c in frame_a:
scene_name = self.scenes[c]
tmp_intervals = random.randint(self.frame_intervals[0], min((self.scene_id[scene_name] // 2), self.frame_intervals[1]))
if (c < (self.n_sample - tmp_intervals)):
if (self.scenes[(c + tmp_intervals)] == scene_name):
pair_c = (c + tmp_intervals)
else:
pair_c = c
c = (c - tmp_intervals)
else:
pair_c = c
c = (c - tmp_intervals)
assert (self.scenes[c] == self.scenes[pair_c])
batch.append(torch.tensor([c, pair_c]))
batch = torch.stack(batch).reshape((- 1))
(yield batch) |
def conv_5_3_hook(module, input, output):
global vgg_conv5_3
vgg_conv5_3 = output
return None |
def ReflectionGroup(*args, **kwds):
if (not is_chevie_available()):
raise ImportError("the GAP3 package 'chevie' is needed to work with (complex) reflection groups")
from sage.interfaces.gap3 import gap3
gap3.load_package('chevie')
error_msg = 'the input data (%s) is not valid for reflection groups'
W_types = []
is_complex = False
for arg in args:
if isinstance(arg, list):
X = tuple(arg)
else:
X = arg
if (not (isinstance(X, (CartanType_abstract, tuple)) or ((X in ZZ) and (4 <= X <= 37)))):
raise ValueError((error_msg % X))
if isinstance(X, CartanType_abstract):
if (not X.is_finite()):
raise ValueError((error_msg % X))
if hasattr(X, 'cartan_type'):
X = X.cartan_type()
if X.is_irreducible():
W_types.extend([(X.letter, X.n)])
else:
W_types.extend([(x.letter, x.n) for x in X.component_types()])
elif ((X == (2, 2, 2)) or (X == ('I', 2))):
W_types.extend([('A', 1), ('A', 1)])
elif (X == (2, 2, 3)):
W_types.extend([('A', 3)])
else:
W_types.append(X)
for (i, W_type) in enumerate(W_types):
if (W_type in ZZ):
if (W_type == 23):
W_types[i] = ('H', 3)
elif (W_type == 28):
W_types[i] = ('F', 4)
elif (W_type == 30):
W_types[i] = ('H', 4)
elif (W_type == 35):
W_types[i] = ('E', 6)
elif (W_type == 36):
W_types[i] = ('E', 7)
elif (W_type == 37):
W_types[i] = ('E', 8)
if (isinstance(W_type, tuple) and (len(W_type) == 3)):
if (W_type[0] == W_type[1] == 1):
W_types[i] = ('A', (W_type[2] - 1))
elif ((W_type[0] == 2) and (W_type[1] == 1)):
W_types[i] = ('B', W_type[2])
elif (W_type[0] == W_type[1] == 2):
W_types[i] = ('D', W_type[2])
elif ((W_type[0] == W_type[1]) and (W_type[2] == 2)):
W_types[i] = ('I', W_type[0])
W_type = W_types[i]
if ((W_type in ZZ) or (isinstance(W_type, tuple) and (len(W_type) == 3))):
is_complex = True
for index_set_kwd in ['index_set', 'hyperplane_index_set', 'reflection_index_set']:
index_set = kwds.get(index_set_kwd, None)
if (index_set is not None):
if isinstance(index_set, (list, tuple)):
kwds[index_set_kwd] = tuple(index_set)
else:
raise ValueError(('the keyword %s must be a list or tuple' % index_set_kwd))
if (len(W_types) == 1):
if (is_complex is True):
cls = IrreducibleComplexReflectionGroup
else:
cls = IrreducibleRealReflectionGroup
elif (is_complex is True):
cls = ComplexReflectionGroup
else:
cls = RealReflectionGroup
return cls(tuple(W_types), index_set=kwds.get('index_set', None), hyperplane_index_set=kwds.get('hyperplane_index_set', None), reflection_index_set=kwds.get('reflection_index_set', None)) |
class AutoEncoderConfig(DetectorConfig, NormalizingConfig):
_default_threshold = AggregateAlarms(alm_threshold=2.5, abs_score=True)
def __init__(self, hidden_size: int=5, layer_sizes: Sequence[int]=(25, 10, 5), sequence_len: int=1, lr: float=0.001, batch_size: int=512, num_epochs: int=50, **kwargs):
super().__init__(**kwargs) |
def test_duplicate_keys():
result = ak.operations.from_json(' [ { "x" :1 ,"y":1.1, "x": 999},{"y": 2.2, "y": 999, "x": 2}, {"x": 3, "x": 999, "y": 3.3}]', schema={'type': 'array', 'items': {'type': 'object', 'properties': {'x': {'type': 'integer'}, 'y': {'type': 'number'}}, 'required': ['x', 'y']}})
assert (result.to_list() == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 3, 'y': 3.3}])
assert (str(result.type) == '3 * {x: int64, y: float64}') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.