code stringlengths 101 5.91M |
|---|
class BoolQProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'train.jsonl'), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'val.jsonl'), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'test.jsonl'), 'test')
def get_dev32_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'dev32.jsonl'), 'dev32')
def get_unlabeled_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, 'unlabeled.jsonl'), 'unlabeled')
def get_labels(self):
return ['False', 'True']
def _create_examples(path: str, set_type: str) -> List[InputExample]:
examples = []
with open(path, encoding='utf8') as f:
for line in f:
example_json = json.loads(line)
idx = example_json['idx']
label = (str(example_json['label']) if ('label' in example_json) else None)
guid = ('%s-%s' % (set_type, idx))
text_a = example_json['passage']
text_b = example_json['question']
example = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, idx=idx)
examples.append(example)
return examples |
class WeiboDataAdmin(ReadOnlyModelAdmin):
list_display = ('weibo_id', 'uid', 'create_time', 'weibo_cont', 'repost_num', 'comment_num', 'praise_num')
search_fields = ['weibo_cont', 'weibo_id']
list_per_page = 20 |
def get_pinned_packages():
pkgs = {'NUMPY', 'PANDAS', 'SKLEARN', 'PYTHON'}
pinned = {}
for env_name in pkgs:
key = f'CI_{env_name}_VERSION'
ver = os.environ.get(key, '*')
pinned[key] = ver
return pinned |
def get_test_name_from_whole_path(path: str) -> str:
start = path.rfind('/')
end = path.rfind('.')
assert ((start >= 0) and (end >= 0))
return path[(start + 1):end] |
def get_config():
config = ConfigDict()
config.run = run = ConfigDict()
run.name = 'infty_diff'
run.experiment = 'ffhq_mollified_128'
run.wandb_dir = ''
run.wandb_mode = 'online'
config.data = data = ConfigDict()
data.name = 'ffhq'
data.root_dir = ''
data.img_size = FieldReference(128)
data.channels = 3
data.fid_samples = 50000
config.train = train = ConfigDict()
train.load_checkpoint = False
train.amp = True
train.batch_size = 16
train.sample_size = 16
train.plot_graph_steps = 100
train.plot_samples_steps = 5000
train.checkpoint_steps = 10000
train.ema_update_every = 10
train.ema_decay = 0.995
config.model = model = ConfigDict()
model.nf = 64
model.time_emb_dim = 256
model.num_conv_blocks = 3
model.knn_neighbours = 3
model.depthwise_sparse = True
model.kernel_size = 7
model.backend = 'torchsparse'
model.uno_res = 64
model.uno_base_channels = 64
model.uno_mults = (1, 2, 4, 8)
model.uno_blocks_per_level = (2, 2, 2, 2)
model.uno_attn_resolutions = [16, 8]
model.uno_dropout_from_resolution = 16
model.uno_dropout = 0.1
model.uno_conv_type = 'conv'
model.z_dim = 256
model.learn_sigma = False
model.sigma_small = False
model.stochastic_encoding = False
model.kld_weight = 0.0001
config.diffusion = diffusion = ConfigDict()
diffusion.steps = 1000
diffusion.noise_schedule = 'cosine'
diffusion.schedule_sampler = 'uniform'
diffusion.loss_type = 'mse'
diffusion.gaussian_filter_std = 1.0
diffusion.model_mean_type = 'mollified_epsilon'
diffusion.multiscale_loss = False
diffusion.multiscale_max_img_size = (config.data.get_ref('img_size') // 2)
diffusion.mollifier_type = 'dct'
config.mc_integral = mc_integral = ConfigDict()
mc_integral.type = 'uniform'
mc_integral.q_sample = ((config.data.get_ref('img_size') ** 2) // 4)
config.optimizer = optimizer = ConfigDict()
optimizer.learning_rate = 0.0001
optimizer.adam_beta1 = 0.9
optimizer.adam_beta2 = 0.99
optimizer.warmup_steps = 0
optimizer.gradient_skip = False
optimizer.gradient_skip_threshold = 500.0
return config |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
if (val in NULL_VALUES):
return [np.nan]
if (not validate_br_cpf(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format == 'compact'):
result = [cpf.compact(val)]
elif (output_format == 'standard'):
result = [cpf.format(val)]
return result |
def momentum(parameters, gradients, mu, eps):
t = U.create_shared(1)
m = ((1 - (3.0 / (t + 5))) < mu)
mu = ((m * (1 - (3.0 / (t + 5)))) + ((1 - m) * mu))
deltas = [U.create_shared(np.zeros(p.get_value().shape)) for p in parameters]
delta_nexts = [((mu * delta) + (eps * grad)) for (delta, grad) in zip(deltas, gradients)]
delta_updates = [(delta, delta_next) for (delta, delta_next) in zip(deltas, delta_nexts)]
param_updates = [(param, (param - delta_next)) for (param, delta_next) in zip(parameters, delta_nexts)]
return ((delta_updates + param_updates) + [(t, (t + 1))]) |
class advanced_model(torch.nn.Module):
def __init__(self):
super(advanced_model, self).__init__()
self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1)
self.bn1 = BatchNorm2d(3)
self.relu1 = ReLU()
self.conv2 = Conv2d(3, 3, kernel_size=1, stride=1)
self.bn2 = BatchNorm2d(3)
self.relu2 = ReLU()
self.dense = Linear(8, 7)
def forward(self, inp):
x = self.conv1(inp)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.dense(x)
return x |
def _get_build_requires(config_settings):
config_settings = _fix_config(config_settings)
requirements = ['setuptools', 'wheel']
sys.argv = ((sys.argv[:1] + ['egg_info']) + config_settings['--global-option'])
try:
with Distribution.patch():
_run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements |
class _suppress_stdout_stderr(object):
def __init__(self):
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
for fd in (self.null_fds + self.save_fds):
os.close(fd) |
class UpstreamExpert(nn.Module):
def __init__(self, ckpt: str=None, model_config: str=None, **kwargs):
super().__init__()
self.name = '[Example UpstreamExpert]'
print(f'{self.name} - You can use model_config to construct your customized model: {model_config}')
print(f'{self.name} - You can use ckpt to load your pretrained weights: {ckpt}')
print(f"{self.name} - If you store the pretrained weights and model config in a single file, you can just choose one argument (ckpt or model_config) to pass. It's up to you!")
self.model1 = nn.Linear(1, HIDDEN_DIM)
self.model2 = nn.Linear(HIDDEN_DIM, HIDDEN_DIM)
def get_downsample_rates(self, key: str) -> int:
return 1
def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor])])]:
wavs = pad_sequence(wavs, batch_first=True).unsqueeze((- 1))
hidden = self.model1(wavs)
feature = self.model2(hidden)
return {'hidden_states': [hidden, feature], 'PR': [hidden, feature], 'ASR': [hidden, feature], 'QbE': [hidden, feature], 'SID': [hidden, feature], 'ASV': [hidden, feature], 'SD': [hidden, feature], 'ER': [hidden, feature], 'SF': [hidden, feature], 'SE': [hidden, feature], 'SS': [hidden, feature], 'secret': [hidden, feature]} |
def parse_ml_domain(ml_domain):
intent_utterances = {}
customer_entities = {}
intent_label_to_api_name = {}
api_name_to_intent_label = {}
for item in ml_domain:
data_type = list(item.keys())[0]
if (data_type == 'mlIntents'):
(intent_set_api_name, intent_utterance_type, intent_utts, intent_to_api, api_to_intent_label) = _parse_ml_intents(item[data_type])
intent_label_to_api_name.update(intent_to_api)
api_name_to_intent_label.update(api_to_intent_label)
if (len(intent_utts) > 0):
if (intent_utterance_type not in intent_utterances):
intent_utterances[intent_utterance_type] = {}
intent_utterances[intent_utterance_type][intent_set_api_name] = intent_utts
if (data_type == 'mlSlotClasses'):
(entity_api_name, extraction_type, values) = parse_ml_slot_classes(item[data_type])
if (len(values) > 0):
if (extraction_type not in customer_entities):
customer_entities[extraction_type] = {}
customer_entities[extraction_type][entity_api_name] = values
return (intent_utterances, customer_entities, intent_label_to_api_name, api_name_to_intent_label) |
_display_as_base
class _UFuncOutputCastingError(_UFuncCastingError):
def __init__(self, ufunc, casting, from_, to, i):
super().__init__(ufunc, casting, from_, to)
self.out_i = i
def __str__(self):
i_str = ('{} '.format(self.out_i) if (self.ufunc.nout != 1) else '')
return 'Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting rule {!r}'.format(self.ufunc.__name__, i_str, self.from_, self.to, self.casting) |
class Train():
def __init__(self, config):
self.batch_size = config.batch_size
self.image_path = config.image_path
self.align_path = config.align_path
self.num_gpus = config.num_gpus
self.ctx = setting_ctx(self.num_gpus)
self.num_workers = config.num_workers
self.seq_len = 75
def build_model(self, dr_rate=0, path=None):
self.net = LipNet(dr_rate)
self.net.hybridize()
self.net.initialize(ctx=self.ctx)
if (path is not None):
self.load_model(path)
self.loss_fn = gluon.loss.CTCLoss()
self.trainer = gluon.Trainer(self.net.collect_params(), optimizer='SGD')
def save_model(self, epoch, loss):
prefix = 'checkpoint/epoches'
file_name = '{prefix}_{epoch}_loss_{l:.4f}'.format(prefix=prefix, epoch=str(epoch), l=loss)
self.net.save_parameters(file_name)
def load_model(self, path=''):
self.net.load_parameters(path)
def load_dataloader(self):
input_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.7136, 0.4906, 0.3283), (0.1138, 0.1078, 0.0917))])
training_dataset = LipsDataset(self.image_path, self.align_path, mode='train', transform=input_transform, seq_len=self.seq_len)
self.train_dataloader = mx.gluon.data.DataLoader(training_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
valid_dataset = LipsDataset(self.image_path, self.align_path, mode='valid', transform=input_transform, seq_len=self.seq_len)
self.valid_dataloader = mx.gluon.data.DataLoader(valid_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
def train(self, data, label, batch_size):
sum_losses = 0
len_losses = 0
with autograd.record():
losses = [self.loss_fn(self.net(X), Y) for (X, Y) in zip(data, label)]
for loss in losses:
sum_losses += mx.nd.array(loss).sum().asscalar()
len_losses += len(loss)
loss.backward()
self.trainer.step(batch_size)
return (sum_losses, len_losses)
def infer(self, input_data, input_label):
sum_losses = 0
len_losses = 0
for (data, label) in zip(input_data, input_label):
pred = self.net(data)
sum_losses += mx.nd.array(self.loss_fn(pred, label)).sum().asscalar()
len_losses += len(data)
pred_convert = char_beam_search(pred)
label_convert = char_conv(label.asnumpy())
for (target, pred) in zip(label_convert, pred_convert):
print('target:{t} pred:{p}'.format(t=target, p=pred))
return (sum_losses, len_losses)
def train_batch(self, dataloader):
sum_losses = 0
len_losses = 0
for (input_data, input_label) in tqdm(dataloader):
data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False)
label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False)
batch_size = input_data.shape[0]
(sum_losses, len_losses) = self.train(data, label, batch_size)
sum_losses += sum_losses
len_losses += len_losses
return (sum_losses, len_losses)
def infer_batch(self, dataloader):
sum_losses = 0
len_losses = 0
for (input_data, input_label) in dataloader:
data = gluon.utils.split_and_load(input_data, self.ctx, even_split=False)
label = gluon.utils.split_and_load(input_label, self.ctx, even_split=False)
(sum_losses, len_losses) = self.infer(data, label)
sum_losses += sum_losses
len_losses += len_losses
return (sum_losses, len_losses)
def run(self, epochs):
best_loss = sys.maxsize
for epoch in trange(epochs):
iter_no = 0
(sum_losses, len_losses) = self.train_batch(self.train_dataloader)
if ((iter_no % 20) == 0):
current_loss = (sum_losses / len_losses)
print('[Train] epoch:{e} iter:{i} loss:{l:.4f}'.format(e=epoch, i=iter_no, l=current_loss))
(sum_val_losses, len_val_losses) = self.infer_batch(self.valid_dataloader)
current_val_loss = (sum_val_losses / len_val_losses)
print('[Vaild] epoch:{e} iter:{i} loss:{l:.4f}'.format(e=epoch, i=iter_no, l=current_val_loss))
if (best_loss > current_val_loss):
self.save_model(epoch, current_val_loss)
best_loss = current_val_loss
iter_no += 1 |
class LSTMUtteranceEmbedder(nn.Module):
def __init__(self, token_embedder, lstm_dim, max_words):
super(LSTMUtteranceEmbedder, self).__init__()
self._token_embedder = token_embedder
self._bilstm = BidirectionalSourceEncoder(token_embedder.embed_dim, lstm_dim, nn.LSTMCell)
self._embed_dim = lstm_dim
self._max_words = max_words
def forward(self, utterances):
utterances = [(utterance[:self._max_words] + [EOS]) for utterance in utterances]
token_indices = SequenceBatch.from_sequences(utterances, self._token_embedder.vocab)
token_embeds = self._token_embedder.embed_seq_batch(token_indices)
bi_hidden_states = self._bilstm(token_embeds.split())
final_states = torch.cat(bi_hidden_states.final_states, 1)
return torch.stack(final_states, 0)
def embed_dim(self):
return self._embed_dim
def max_words(self):
return self._max_words
def token_embedder(self):
return self._token_embedder |
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data')
parser.add_argument('--tasks', help='tasks to download data for as a comma separated string', type=str, default='all')
parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt', type=str, default='')
args = parser.parse_args(arguments)
if (not os.path.isdir(args.data_dir)):
os.mkdir(args.data_dir)
tasks = get_tasks(args.tasks)
for task in tasks:
if (task == 'MRPC'):
format_mrpc(args.data_dir, args.path_to_mrpc)
elif (task == 'diagnostic'):
download_diagnostic(args.data_dir)
else:
download_and_extract(task, args.data_dir) |
class CompactBilinearPooling(nn.Module):
def __init__(self, input_dim1, input_dim2, output_dim, sum_pool=True):
super().__init__()
self.output_dim = output_dim
self.sum_pool = sum_pool
self.sketch1 = nn.Parameter(self.generate_sketch_matrix(torch.randint(output_dim, size=(input_dim1,)), ((2 * torch.randint(2, size=(input_dim1,))) - 1), input_dim1, output_dim), requires_grad=False)
self.sketch2 = nn.Parameter(self.generate_sketch_matrix(torch.randint(output_dim, size=(input_dim2,)), ((2 * torch.randint(2, size=(input_dim2,))) - 1), input_dim2, output_dim), requires_grad=False)
def generate_sketch_matrix(self, rand_h, rand_s, input_dim, output_dim):
return torch.sparse.FloatTensor(torch.stack([torch.arange(input_dim, out=torch.LongTensor()), rand_h.long()]), rand_s.float(), [input_dim, output_dim]).to_dense()
def forward(self, x1, x2):
assert (len(x1.shape) == len(x2.shape))
if ((len(x1.shape) == 4) and (len(x2.shape) == 4)):
fft1 = rfft(x1.permute(0, 2, 3, 1).matmul(self.sketch1), signal_ndim=1)
fft2 = rfft(x2.permute(0, 2, 3, 1).matmul(self.sketch2), signal_ndim=1)
else:
fft1 = rfft(x1.matmul(self.sketch1), signal_ndim=1)
fft2 = rfft(x2.matmul(self.sketch2), signal_ndim=1)
fft_product = torch.stack([((fft1[(..., 0)] * fft2[(..., 0)]) - (fft1[(..., 1)] * fft2[(..., 1)])), ((fft1[(..., 0)] * fft2[(..., 1)]) + (fft1[(..., 1)] * fft2[(..., 0)]))], dim=(- 1))
cbp = (irfft(fft_product, signal_ndim=1, dim=(- 1), s=(self.output_dim,)) * self.output_dim)
if ((len(x1.shape) == 4) and (len(x2.shape) == 4)):
cbp = (cbp.sum(dim=[1, 2]) if self.sum_pool else cbp.permute(0, 3, 1, 2))
return cbp |
class Experiment(ABC, LoggingBase):
def __init__(self, cfg: ExperimentConfig):
super().__init__()
self._config = cfg
self._threads = 1
self._invocations = 1
self._invocation_barrier = Semaphore(self._invocations)
def config(self):
return self._config
def name() -> str:
pass
def typename() -> str:
pass |
class NerServicer(ner_pb2_grpc.NERPredictorServiceServicer):
def __init__(self, batch_predictor):
super(NerServicer, self).__init__()
self.predictor = batch_predictor
def predict(self, request, context):
try:
text = request.document.decode('utf-8')
response = self.predictor.predict(text, request.returnSpan)
except Exception as e:
response = ner_pb2.NERPredictionResponse()
response.success = False
response.error = str(e)
return response |
class LLama2LoraKbitEngine(CausalLoraKbitEngine):
config_name: str = 'llama2_lora_kbit_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
model_name = 'daryl149/llama-2-7b-chat-hf'
super().__init__(model_name=model_name, weights_path=None, target_modules=['q_proj', 'v_proj'], trust_remote_code=True, load_4bit=True)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id |
def download_all(path=None):
if (pooch is None):
raise ImportError("Missing optional dependency 'pooch' required for scipy.datasets module. Please use pip or conda to install 'pooch'.")
if (path is None):
path = pooch.os_cache('scipy-data')
for (dataset_name, dataset_hash) in _registry.registry.items():
pooch.retrieve(url=_registry.registry_urls[dataset_name], known_hash=dataset_hash, fname=dataset_name, path=path) |
def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):
logger = logging.getLogger(logger_name)
if (logger_name in initialized_logger):
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
(rank, _) = get_dist_info()
if (rank != 0):
logger.setLevel('ERROR')
elif (log_file is not None):
logger.setLevel(log_level)
file_handler = logging.FileHandler(log_file, 'a')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
initialized_logger[logger_name] = True
return logger |
def load_genre_dict(fname: str) -> Dict[(str, Any)]:
genre_dict = {}
with open(fname, 'r') as f:
reader = csv.reader(f)
for row in reader:
genre_dict[row[0]] = 1
return genre_dict |
class TorchBenchmarkBase(object):
def __init__(self):
self.user_given_name = None
self._jit_forward = None
self._pass_count = 0
self._num_inputs_require_grads = 0
def _set_backward_test(self, is_backward):
self._is_backward = is_backward
def auto_set(self):
if (not self._is_backward):
return False
if (self._pass_count == 0):
self._num_inputs_require_grads += 1
return True
else:
self._auto_set_counter += 1
return (self._pass_count == self._auto_set_counter)
def forward(self):
pass
def _wrap_forward(self, foo):
return torch.ops.operator_benchmark._consume(self.forward())
def _generate_jit_forward_graph(self):
func = torch.jit.trace(self._wrap_forward, torch.rand(1))
place_holder = torch.rand(1)
.script
def _jit_forward_graph(iters, place_holder):
result = torch.jit.annotate(torch.Tensor, place_holder)
for _ in range(iters):
result = func(place_holder)
return result
return _jit_forward_graph
def module_name(self):
if self.user_given_name:
return self.user_given_name
return self.__class__.__name__
def set_module_name(self, name):
self.user_given_name = name
def test_name(self, **kargs):
skip_key_list = ['device']
test_name_str = []
for key in kargs:
value = kargs[key]
test_name_str.append((('' if (key in skip_key_list) else key) + str((value if (type(value) != bool) else int(value)))))
name = ((self.module_name() + '_') + '_'.join(test_name_str)).replace(' ', '')
return name |
def test_nested_constants():
def program(A: dace.int64[20]):
i = A[0]
j = (i + 1)
k = (j + 1)
l = (i + k)
A[l] = k
sdfg = program.to_sdfg()
ScalarToSymbolPromotion().apply_pass(sdfg, {})
ConstantPropagation().apply_pass(sdfg, {})
assert (set(sdfg.symbols.keys()) == {'i'})
sdfg.simplify()
assert (sdfg.number_of_nodes() == 2)
last_state = sdfg.sink_nodes()[0]
sink = last_state.sink_nodes()[0]
memlet = last_state.in_edges(sink)[0].data
assert (memlet.data == 'A')
assert (str(memlet.subset) == '2*i + 2') |
def move_cpp_tensors_to_device(cpp_tensor_stmts, device):
return ['{}.to("{}")'.format(tensor_stmt, device) for tensor_stmt in cpp_tensor_stmts] |
def fit_predict(estimator, X):
tic = perf_counter()
if (estimator[(- 1)].__class__.__name__ == 'LocalOutlierFactor'):
estimator.fit(X)
y_pred = estimator[(- 1)].negative_outlier_factor_
else:
y_pred = estimator.fit(X).decision_function(X)
toc = perf_counter()
print(f'Duration for {model_name}: {(toc - tic):.2f} s')
return y_pred |
class _SetupBuilder(NetBuilder):
INIT = 'init'
EXIT = 'exit'
def __init__(self, type, name=None):
NetBuilder.__init__(self, name)
self.type = type
def setup(self, net):
if (self.type == _SetupBuilder.INIT):
return core.to_execution_step(self)
def exit(self, net):
if (self.type == _SetupBuilder.EXIT):
return core.to_execution_step(self) |
def auto_adjust_limits(aspect_ratio=0.8):
ax = plt.gca()
ax.autoscale()
ax.relim()
ax.autoscale_view()
(x0, x1) = ax.get_xlim()
(y0, y1) = ax.get_ylim()
ax.set_aspect(((abs((x1 - x0)) / abs((y1 - y0))) * aspect_ratio))
plt.draw() |
def get_win_launcher(type):
launcher_fn = ('%s.exe' % type)
if is_64bit():
launcher_fn = launcher_fn.replace('.', '-64.')
else:
launcher_fn = launcher_fn.replace('.', '-32.')
return resource_string('setuptools', launcher_fn) |
def resnet101(pretrained=False, progress=True, **kwargs):
model = _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
model.fc = nn.Linear(2048, kwargs['num_classes'])
return model |
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
(xmin, xmax) = (np.min(x), np.max(x))
stepsize = ((xmax - xmin) / np.float32(n_bins))
min_bins = (np.float32(n_bins) * (np.float32(1) - np.float32(ratio)))
(xq, loss) = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
solutions = []
(cur_min, cur_max, cur_loss) = (xmin, xmax, loss)
thr = (min_bins * stepsize)
while ((cur_min + thr) < cur_max):
(xq, loss1) = _compress_uniform_simplified(x, bit_rate, (cur_min + stepsize), cur_max)
(xq, loss2) = _compress_uniform_simplified(x, bit_rate, cur_min, (cur_max - stepsize))
if ((cur_loss < loss1) and (cur_loss < loss2)):
solutions.append((cur_min, cur_max, cur_loss))
if (loss1 < loss2):
(cur_min, cur_max, cur_loss) = ((cur_min + stepsize), cur_max, loss1)
else:
(cur_min, cur_max, cur_loss) = (cur_min, (cur_max - stepsize), loss2)
if len(solutions):
best = solutions[0]
for solution in solutions:
if (solution[(- 1)] < best[(- 1)]):
best = solution
return (best[0], best[1])
return (xmin, xmax) |
class PolyWarmupSGD(torch.optim.SGD):
def __init__(self, params, lr, weight_decay, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None, **kwargs):
super().__init__(params, lr=lr, momentum=0.9, weight_decay=weight_decay)
self.global_step = 0
self.warmup_iter = warmup_iter
self.warmup_lr = warmup_ratio
self.max_iter = max_iter
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = ((1 - (self.global_step / self.warmup_iter)) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = ((self.__init_lr[i] * lr_mult) * 10)
elif (self.global_step < self.max_iter):
lr_mult = ((1 - ((self.global_step - self.warmup_iter) / (self.max_iter - self.warmup_iter))) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1 |
def interpolation_gb_heuristic(d):
d = copy(d)
I = d['I']
if ((not d.get('other_ordering_opts', False)) and want_interpolation_gb(I)):
d['interpolation_gb'] = True
d['other_ordering_first'] = False
return d |
def popup_button(label, width=0, enabled=True):
if button(label, width, enabled):
imgui.open_popup(label)
opened = imgui.begin_popup(label)
return opened |
class Credential(object):
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
(yield self.username)
(yield self.password)
def __str__(self):
return ('%(username)s:%(password)s' % vars(self)) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='stats', module='biasedurn', private_modules=['_biasedurn'], all=__all__, attribute=name) |
class Viewer2D(object):
def __init__(self, size=(640, 480), xlim=None, ylim=None):
pygame.init()
screen = pygame.display.set_mode(size)
if (xlim is None):
xlim = (0, size[0])
if (ylim is None):
ylim = (0, size[1])
self._screen = screen
self._xlim = xlim
self._ylim = ylim
def xlim(self):
return self._xlim
def xlim(self, value):
self._xlim = value
def ylim(self):
return self._ylim
def ylim(self, value):
self._ylim = value
def reset(self):
self.fill(Colors.white)
def fill(self, color):
self.screen.fill(color)
def scale_x(self, world_x):
(xmin, xmax) = self.xlim
return int((((world_x - xmin) * self.screen.get_width()) / (xmax - xmin)))
def scale_y(self, world_y):
(ymin, ymax) = self.ylim
return int((self.screen.get_height() - (((world_y - ymin) * self.screen.get_height()) / (ymax - ymin))))
def scale_point(self, point):
(x, y) = point
return (self.scale_x(x), self.scale_y(y))
def scale_factor(self):
(xmin, xmax) = self.xlim
(ymin, ymax) = self.ylim
return min((self.screen.get_width() / (xmax - xmin)), (self.screen.get_height() / (ymax - ymin)))
def scale_size(self, size):
if hasattr(size, '__len__'):
(x, y) = size
return (self.scale_x((x + self.xlim[0])), (self.screen.get_height() - self.scale_y((y + self.ylim[0]))))
return (size * self.scale_factor)
def line(self, color, p1, p2, width=None):
if (width is None):
width = 1
else:
width = int((width * self.scale_factor))
(x1, y1) = self.scale_point(p1)
(x2, y2) = self.scale_point(p2)
pygame.draw.line(self.screen, color, (x1, y1), (x2, y2), width)
def circle(self, color, p, radius):
pygame.draw.circle(self.screen, color, self.scale_point(p), int(self.scale_size(radius)))
def rect(self, color, center, size):
(cx, cy) = self.scale_point(center)
(w, h) = self.scale_size(size)
if (len(color) > 3):
s = pygame.Surface((w, h), pygame.SRCALPHA)
s.fill(color)
self.screen.blit(s, ((cx - (w / 2)), (cy - (h / 2))))
else:
pygame.draw.rect(self.screen, color, pygame.Rect((cx - (w / 2)), (cy - (h / 2)), w, h))
def polygon(self, color, points):
if (len(color) > 3):
s = pygame.Surface((self.screen.get_width(), self.screen.get_height()), pygame.SRCALPHA)
s.fill((0, 0, 0, 0))
pygame.draw.polygon(s, color, list(map(self.scale_point, points)))
self.screen.blit(s, (0, 0))
else:
pygame.draw.polygon(self.screen, color, list(map(self.scale_point, points)))
def screen(self):
return self._screen
def loop_once(self):
pygame.display.flip()
def checker(self, colors=[Colors.white, Colors.black], granularity=4, offset=(0, 0)):
screen_height = self.screen.get_height()
screen_width = self.screen.get_width()
screen_size = min(screen_height, screen_width)
checker_size = int((screen_size / granularity))
offset_x = self.scale_x((offset[0] + self.xlim[0]))
offset_y = self.scale_y((offset[1] + self.ylim[0]))
start_idx = (int((offset_x / checker_size)) + int((offset_y / checker_size)))
offset_x = (((offset_x % checker_size) + checker_size) % checker_size)
offset_y = (((offset_y % checker_size) + checker_size) % checker_size)
for row in range((- 1), (int(np.ceil(((screen_height * 1.0) / checker_size))) + 1)):
for col in range((- 1), (int(np.ceil(((screen_width * 1.0) / checker_size))) + 1)):
the_square = (((col * checker_size) + offset_x), ((row * checker_size) + offset_y), checker_size, checker_size)
self.screen.fill(colors[(((start_idx + row) + col) % 2)], the_square)
def pause(self):
print('press any key on the screen to continue...')
while True:
event = pygame.event.wait()
if (event.type == pygame.KEYDOWN):
break
print('continuing') |
def load_checkpoint(fpath, model, optimizer=None):
ckpt = torch.load(fpath, map_location='cpu')
if (optimizer is None):
optimizer = ckpt.get('optimizer', None)
else:
optimizer.load_state_dict(ckpt['optimizer'])
epoch = ckpt['epoch']
if ('model' in ckpt):
ckpt = ckpt['model']
load_dict = {}
for (k, v) in ckpt.items():
if k.startswith('module.'):
k_ = k.replace('module.', '')
load_dict[k_] = v
else:
load_dict[k] = v
modified = {}
for (k, v) in load_dict.items():
if k.startswith('adaptive_bins_layer.embedding_conv.'):
k_ = k.replace('adaptive_bins_layer.embedding_conv.', 'adaptive_bins_layer.conv3x3.')
modified[k_] = v
elif k.startswith('adaptive_bins_layer.patch_transformer.embedding_encoder'):
k_ = k.replace('adaptive_bins_layer.patch_transformer.embedding_encoder', 'adaptive_bins_layer.patch_transformer.embedding_convPxP')
modified[k_] = v
else:
modified[k] = v
model.load_state_dict(modified)
return (model, optimizer, epoch) |
class SubtensorBatchedIndex(NativeOpGenBase):
in_info = ({'name': 'x', 'ndim': 3, 'shape': (None, None, None), 'bw_in_var': {'want_inplace': 0}}, {'name': 'idx', 'ndim': 2, 'shape': (None, None), 'gradient': 'disconnected'})
out_info = ({'name': 'y', 'ndim': 2, 'shape': ((0, 0), (0, 1))},)
def grad_input_map(cls, x, idx, y, DY):
return (x, idx, DY)
c_extra_support_code = {'select_kernel': '\n DEF_KERNEL\n void select_kernel(\n float* x, long x_dim0, long x_dim1, long x_dim2, long x_stride0, long x_stride1, long x_stride2,\n float* index, long idx_stride0, long idx_stride1,\n float* y, long y_stride0, long y_stride1\n ) {\n const long max_idx = x_dim0 * x_dim1;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long d0 = idx % x_dim0;\n long d1 = idx / x_dim0;\n long d2 = long(index[d0 * idx_stride0 + d1 * idx_stride1]);\n if(d2 < 0) d2 = 0;\n if(d2 >= x_dim2) d2 = x_dim2 - 1;\n y[d0 * y_stride0 + d1 * y_stride1] = x[d0 * x_stride0 + d1 * x_stride1 + d2 * x_stride2];\n }\n }\n ', 'select_bw_kernel': '\n DEF_KERNEL\n void select_bw_kernel(\n float* Dx, long Dx_dim0, long Dx_dim1, long Dx_dim2, long Dx_stride0, long Dx_stride1, long Dx_stride2,\n float* index, long idx_stride0, long idx_stride1,\n float* Dy, long Dy_stride0, long Dy_stride1\n ) {\n const long max_idx = Dx_dim0 * Dx_dim1;\n for(\n long idx = threadIdx.x + blockDim.x * blockIdx.x;\n idx < max_idx;\n idx += gridDim.x * blockDim.x)\n {\n long d0 = idx % Dx_dim0;\n long d1 = idx / Dx_dim0;\n long d2 = long(index[d0 * idx_stride0 + d1 * idx_stride1]);\n if(d2 < 0) d2 = 0;\n if(d2 >= Dx_dim2) d2 = Dx_dim2 - 1;\n Dx[d0 * Dx_stride0 + d1 * Dx_stride1 + d2 * Dx_stride2] = Dy[d0 * Dy_stride0 + d1 * Dy_stride1];\n }\n }\n '}
c_fw_code = '\n assert_cmp(n_inputs, ==, 2);\n assert_cmp(n_outputs, ==, 1);\n Ndarray* x = inputs[0];\n Ndarray* idx = inputs[1];\n Ndarray* y = *outputs[0];\n\n assert_cmp(Ndarray_NDIM(x), ==, 3);\n assert_cmp(Ndarray_NDIM(idx), ==, 2);\n assert_cmp(Ndarray_DIMS(x)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(x)[1], ==, Ndarray_DIMS(idx)[1]);\n assert_cmp(Ndarray_NDIM(y), ==, 2);\n assert_cmp(Ndarray_DIMS(y)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(y)[1], ==, Ndarray_DIMS(idx)[1]);\n\n start_dev_kernel(select_kernel, (\n Ndarray_DEV_DATA(x),\n Ndarray_DIMS(x)[0],\n Ndarray_DIMS(x)[1],\n Ndarray_DIMS(x)[2],\n Ndarray_STRIDE(x, 0),\n Ndarray_STRIDE(x, 1),\n Ndarray_STRIDE(x, 2),\n Ndarray_DEV_DATA(idx),\n Ndarray_STRIDE(idx, 0),\n Ndarray_STRIDE(idx, 1),\n Ndarray_DEV_DATA(y),\n Ndarray_STRIDE(y, 0),\n Ndarray_STRIDE(y, 1)\n ));\n HANDLE_LAST_ERROR();\n '
c_bw_code = '\n assert_cmp(n_inputs, ==, 3);\n assert_cmp(n_outputs, ==, 1);\n Ndarray* x = inputs[0];\n Ndarray* idx = inputs[1];\n Ndarray* Dy = inputs[2];\n Ndarray* Dx = *outputs[0]; // inplace on x\n\n assert_cmp(Ndarray_NDIM(x), ==, 3);\n assert_cmp(Ndarray_NDIM(idx), ==, 2);\n assert_cmp(Ndarray_DIMS(x)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(x)[1], ==, Ndarray_DIMS(idx)[1]);\n assert_cmp(Ndarray_NDIM(Dy), ==, 2);\n assert_cmp(Ndarray_DIMS(Dy)[0], ==, Ndarray_DIMS(idx)[0]);\n assert_cmp(Ndarray_DIMS(Dy)[1], ==, Ndarray_DIMS(idx)[1]);\n assert_cmp(Ndarray_NDIM(Dx), ==, 3);\n assert_cmp(Ndarray_DIMS(Dx)[0], ==, Ndarray_DIMS(x)[0]);\n assert_cmp(Ndarray_DIMS(Dx)[1], ==, Ndarray_DIMS(x)[1]);\n assert_cmp(Ndarray_DIMS(Dx)[2], ==, Ndarray_DIMS(x)[2]);\n\n Ndarray_set_zero(Dx);\n start_dev_kernel(select_bw_kernel, (\n Ndarray_DEV_DATA(Dx),\n Ndarray_DIMS(Dx)[0],\n Ndarray_DIMS(Dx)[1],\n Ndarray_DIMS(Dx)[2],\n Ndarray_STRIDE(Dx, 0),\n Ndarray_STRIDE(Dx, 1),\n Ndarray_STRIDE(Dx, 2),\n Ndarray_DEV_DATA(idx),\n Ndarray_STRIDE(idx, 0),\n Ndarray_STRIDE(idx, 1),\n Ndarray_DEV_DATA(Dy),\n Ndarray_STRIDE(Dy, 0),\n Ndarray_STRIDE(Dy, 1)\n ));\n HANDLE_LAST_ERROR();\n ' |
def register_Ns3LteRrcSapRadioResourceConfigCommonSib_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::RadioResourceConfigCommonSib const &', 'arg0')])
cls.add_instance_attribute('pdschConfigCommon', 'ns3::LteRrcSap::PdschConfigCommon', is_const=False)
cls.add_instance_attribute('rachConfigCommon', 'ns3::LteRrcSap::RachConfigCommon', is_const=False)
return |
def getColorEntry(val, args):
if (not args.colorized):
return ''
if ((not isinstance(val, float)) or math.isnan(val)):
return colors.ENDC
if (val < 0.2):
return colors.RED
elif (val < 0.4):
return colors.YELLOW
elif (val < 0.6):
return colors.BLUE
elif (val < 0.8):
return colors.CYAN
else:
return colors.GREEN |
def load_data(file_name: str, max_to_load: int=100, filter_dict: Optional[dict]=None) -> List[Dict[(str, Any)]]:
count = 0
data = []
filter_dict = (filter_dict or {})
with gzip.open(file_name) as fin:
for l in fin:
d = json.loads(l)
for (k, v) in filter_dict.items():
if (d[k] not in v):
break
else:
count += 1
data.append(d)
if ((max_to_load is not None) and (count >= max_to_load)):
break
return data |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
super(BasicBlock, self).__init__()
assert (style in ['pytorch', 'caffe'])
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert (not with_cp)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def npz_dump(args):
npzfile = np.load(args[0])
if ((len(args) == 1) or (args[1] == '--list')):
print('\n'.join(npzfile.files))
exit(0)
if (args[1] in npzfile.files):
d = npzfile[args[1]]
else:
raise ValueError('No {} in {} npz file'.format(args[1], args[0]))
K = 0
if (len(args) == 3):
K = int(args[2])
np.set_printoptions(precision=6)
np.set_printoptions(suppress=True)
if (K < 0):
np.set_printoptions(threshold=sys.maxsize)
print(d)
print('shape', d.shape)
print('dtype', d.dtype)
dims = len(d.shape)
if (dims == 0):
n = 1
c = 1
h = 1
w = 1
elif (dims == 1):
n = 1
c = 1
h = 1
w = dims
elif (dims == 2):
n = 1
c = 1
h = d.shape[0]
w = d.shape[1]
elif (dims == 3):
n = 1
c = d.shape[0]
h = d.shape[1]
w = d.shape[2]
elif (dims == 4):
n = d.shape[0]
c = d.shape[1]
h = d.shape[2]
w = d.shape[3]
elif (dims == 5):
n = d.shape[0]
c = d.shape[1]
ic = d.shape[2]
h = d.shape[3]
w = d.shape[4]
else:
print('invalid shape')
exit((- 1))
if (((n == 1) or (n == 0)) and (c == 3)):
print('max', np.amax(np.reshape(d, (3, (- 1))), axis=1))
print('min', np.amin(np.reshape(d, (3, (- 1))), axis=1))
print('mean', np.mean(np.reshape(d, (3, (- 1))), axis=1))
print('abs mean fp32', np.mean(np.abs(np.reshape(d, (3, (- 1)))), axis=1))
print('std fp32', np.std(np.reshape(d, (3, (- 1))), axis=1))
if (K > 0):
print('Show Top-K', K)
for i in get_topk(d, K):
print(i) |
def main(N, family):
K0 = FunctionSpace(N, 'Fourier', dtype='d')
SD = FunctionSpace(N, family, bc=(0, 0))
ST = FunctionSpace(N, family)
TD = TensorProductSpace(comm, (K0, SD), axes=(1, 0))
TT = TensorProductSpace(comm, (K0, ST), axes=(1, 0))
VT = VectorSpace(TT)
Q = CompositeSpace([VT, TD])
gu = TrialFunction(Q)
pq = TestFunction(Q)
(g, u) = gu
(p, q) = pq
A00 = inner(p, g)
if (family == 'legendre'):
A01 = inner(div(p), u)
else:
A01 = inner(p, (- grad(u)))
A10 = inner(q, div(g))
vfj = Array(Q, buffer=(0, 0, fe))
(vj, fj) = vfj
vf_hat = Function(Q)
(v_hat, f_hat) = vf_hat
f_hat = inner(q, fj, output_array=f_hat)
M = BlockMatrix(((A00 + A01) + A10))
gu_hat = M.solve(vf_hat)
gu = gu_hat.backward()
(g_, u_) = gu
uj = Array(TD, buffer=ue)
duxj = Array(TT, buffer=dux)
duyj = Array(TT, buffer=duy)
error = [np.sqrt(inner(1, ((uj - u_) ** 2))), np.sqrt(inner(1, ((duxj - g_[0]) ** 2))), np.sqrt(inner(1, ((duyj - g_[1]) ** 2)))]
if ('pytest' not in os.environ):
import matplotlib.pyplot as plt
plt.figure()
X = TD.local_mesh(True)
plt.contourf(X[0], X[1], u_)
plt.figure()
plt.quiver(X[1], X[0], g_[1], g_[0])
plt.figure()
plt.spy(M.diags(0, format='csr').toarray())
plt.show()
else:
if (comm.Get_rank() == 0):
print(SD.family())
print(' L2 error u dudx dudy')
print((' %2.4e %2.4e %2.4e' % (error[0], error[1], error[2])))
assert np.all((abs(np.array(error)) < 1e-08)), error |
_GENERATOR_REGISTRY.register()
class RRPN(RPN):
def __init__(self, cfg, input_shape: Dict[(str, ShapeSpec)]):
super().__init__(cfg, input_shape)
self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
def forward(self, images, features, gt_instances=None):
gt_boxes = ([x.gt_boxes for x in gt_instances] if (gt_instances is not None) else None)
del gt_instances
features = [features[f] for f in self.in_features]
(pred_objectness_logits, pred_anchor_deltas) = self.rpn_head(features)
anchors = self.anchor_generator(features)
outputs = RRPNOutputs(self.box2box_transform, self.anchor_matcher, self.batch_size_per_image, self.positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, self.boundary_threshold, gt_boxes, self.smooth_l1_beta)
if self.training:
losses = outputs.losses()
else:
losses = {}
with torch.no_grad():
proposals = find_top_rrpn_proposals(outputs.predict_proposals(), outputs.predict_objectness_logits(), images, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_side_len, self.training)
return (proposals, losses) |
def face_img_func(key, entry, viewer):
img = entry['img'][0]
assert ((img.ndim == 3) and ((img.shape[0] == 1) or (img.shape[0] == 3)))
img = np.transpose(img, (1, 2, 0))
img = img.copy()
img += 0.5
try:
detection_raw = entry['detection'][0]
detection = (detection_raw > 0.5)
if (0.0 <= detection_raw <= 1.0):
drawing.draw_detection(img, detection)
landmark = entry['landmark'][0]
visibility = entry['visibility'][0]
landmark_color = ((0, 1, 0) if (detection == 1) else (0, 0, 1))
drawing.draw_landmark(img, landmark, visibility, landmark_color, 0.5)
pose = entry['pose'][0]
drawing.draw_pose(img, pose)
gender = entry['gender'][0]
if (0.0 <= gender <= 1.0):
gender = (gender > 0.5)
drawing.draw_gender(img, gender)
except KeyError:
pass
img = (img * 255).astype(np.uint8)
caption = '{:02d}'.format(viewer.img_cnts[key])
return {'img': img, 'cap': caption} |
def train(base_model: str='', data_path: str='yahma/alpaca-cleaned', output_dir: str='/common/users/jj635/llama/mycheckpoint/', batch_size: int=128, micro_batch_size: int=4, num_epochs: int=3, learning_rate: float=0.0003, cutoff_len: int=256, val_set_size: int=0, lora_r: int=8, lora_alpha: int=16, lora_dropout: float=0.05, lora_target_modules: List[str]=['q_proj', 'v_proj'], train_on_inputs: bool=True, group_by_length: bool=False, wandb_project: str='', wandb_run_name: str='', wandb_watch: str='', wandb_log_model: str='', resume_from_checkpoint: str=None):
print(f'''Training Alpaca-LoRA model with params:
base_model: {base_model}
data_path: {data_path}
output_dir: {output_dir}
batch_size: {batch_size}
micro_batch_size: {micro_batch_size}
num_epochs: {num_epochs}
learning_rate: {learning_rate}
cutoff_len: {cutoff_len}
val_set_size: {val_set_size}
lora_r: {lora_r}
lora_alpha: {lora_alpha}
lora_dropout: {lora_dropout}
lora_target_modules: {lora_target_modules}
train_on_inputs: {train_on_inputs}
group_by_length: {group_by_length}
wandb_project: {wandb_project}
wandb_run_name: {wandb_run_name}
wandb_watch: {wandb_watch}
wandb_log_model: {wandb_log_model}
resume_from_checkpoint: {resume_from_checkpoint}
''')
assert base_model, "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
gradient_accumulation_steps = (batch_size // micro_batch_size)
device_map = 'auto'
world_size = int(os.environ.get('WORLD_SIZE', 1))
ddp = (world_size != 1)
if ddp:
device_map = {'': int((os.environ.get('LOCAL_RANK') or 0))}
gradient_accumulation_steps = (gradient_accumulation_steps // world_size)
use_wandb = ((len(wandb_project) > 0) or (('WANDB_PROJECT' in os.environ) and (len(os.environ['WANDB_PROJECT']) > 0)))
if (len(wandb_project) > 0):
os.environ['WANDB_PROJECT'] = wandb_project
if (len(wandb_watch) > 0):
os.environ['WANDB_WATCH'] = wandb_watch
if (len(wandb_log_model) > 0):
os.environ['WANDB_LOG_MODEL'] = wandb_log_model
model = LlamaForCausalLM.from_pretrained(base_model, load_in_8bit=True, torch_dtype=torch.float16, device_map=device_map)
tokenizer = LlamaTokenizer.from_pretrained(base_model)
tokenizer.pad_token_id = 0
tokenizer.padding_side = 'left'
def tokenize(prompt, add_eos_token=True):
result = tokenizer(prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None)
if ((result['input_ids'][(- 1)] != tokenizer.eos_token_id) and (len(result['input_ids']) < cutoff_len) and add_eos_token):
result['input_ids'].append(tokenizer.eos_token_id)
result['attention_mask'].append(1)
result['labels'] = result['input_ids'].copy()
return result
def generate_and_tokenize_prompt(data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(full_prompt)
if (not train_on_inputs):
user_prompt = generate_prompt({**data_point, 'output': ''})
tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt['input_ids'])
tokenized_full_prompt['labels'] = (([(- 100)] * user_prompt_len) + tokenized_full_prompt['labels'][user_prompt_len:])
return tokenized_full_prompt
model = prepare_model_for_int8_training(model)
config = LoraConfig(r=lora_r, lora_alpha=lora_alpha, target_modules=lora_target_modules, lora_dropout=lora_dropout, bias='none', task_type='CAUSAL_LM')
model = get_peft_model(model, config)
if (data_path.endswith('.json') or data_path.endswith('.jsonl')):
data = load_dataset('json', data_files=data_path)
else:
data = load_dataset(data_path)
if resume_from_checkpoint:
checkpoint_name = os.path.join(resume_from_checkpoint, 'pytorch_model.bin')
if (not os.path.exists(checkpoint_name)):
checkpoint_name = os.path.join(resume_from_checkpoint, 'adapter_model.bin')
resume_from_checkpoint = False
if os.path.exists(checkpoint_name):
print(f'Restarting from {checkpoint_name}')
adapters_weights = torch.load(checkpoint_name)
model = set_peft_model_state_dict(model, adapters_weights)
else:
print(f'Checkpoint {checkpoint_name} not found')
model.print_trainable_parameters()
if (val_set_size > 0):
train_val = data['train'].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_data = train_val['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = train_val['test'].shuffle().map(generate_and_tokenize_prompt)
else:
train_data = data['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = None
if ((not ddp) and (torch.cuda.device_count() > 1)):
model.is_parallelizable = True
model.model_parallel = True
trainer = transformers.Trainer(model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments(per_device_train_batch_size=micro_batch_size, gradient_accumulation_steps=gradient_accumulation_steps, warmup_steps=100, num_train_epochs=num_epochs, learning_rate=learning_rate, fp16=True, logging_steps=10, optim='adamw_torch', evaluation_strategy=('steps' if (val_set_size > 0) else 'no'), save_strategy='steps', eval_steps=(200 if (val_set_size > 0) else None), save_steps=200, output_dir=output_dir, save_total_limit=3, load_best_model_at_end=(True if (val_set_size > 0) else False), ddp_find_unused_parameters=(False if ddp else None), group_by_length=group_by_length, report_to=('wandb' if use_wandb else None), run_name=(wandb_run_name if use_wandb else None)), data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, pad_to_multiple_of=8, return_tensors='pt', padding=True))
model.config.use_cache = False
old_state_dict = model.state_dict
model.state_dict = (lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())).__get__(model, type(model))
if ((torch.__version__ >= '2') and (sys.platform != 'win32')):
model = torch.compile(model)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
model.save_pretrained(output_dir)
print("\n If there's a warning about missing keys above, please disregard :)") |
_ordering
class ControlFlowDistance():
def __init__(self, approach_level: int=0, branch_distance: float=0.0) -> None:
assert ((approach_level >= 0) and (branch_distance >= 0.0)), 'Expect approach_level and branch_distance to be non-negative'
self._approach_level = approach_level
self._branch_distance = branch_distance
def __eq__(self, other: Any) -> bool:
if (self is other):
return True
if (not isinstance(other, ControlFlowDistance)):
return False
return ((self._approach_level, self._branch_distance) == (other.approach_level, other.branch_distance))
def __lt__(self, other: ControlFlowDistance) -> bool:
if (not isinstance(other, ControlFlowDistance)):
raise TypeError("'<' not supported between instances of 'ControlFlowDistance' and '%s'", type(other))
return ((self._approach_level, self._branch_distance) < (other.approach_level, other.branch_distance))
def approach_level(self) -> int:
return self._approach_level
_level.setter
def approach_level(self, approach_level: int):
assert (approach_level >= 0), 'Expect approach_level to be non-negative'
self._approach_level = approach_level
def branch_distance(self) -> float:
return self._branch_distance
_distance.setter
def branch_distance(self, branch_distance: float) -> None:
assert (branch_distance >= 0), 'Expect branch_distance to be non-negative'
self._branch_distance = branch_distance
def increase_approach_level(self) -> None:
self._approach_level += 1
def get_resulting_branch_fitness(self) -> float:
return (self._approach_level + ff.normalise(self._branch_distance))
def __str__(self) -> str:
return f'approach = {self._approach_level}, branch distance = {self._branch_distance}'
def __repr__(self) -> str:
return f'ControlFlowDistance(approach_level={self._approach_level}, branch_distance={self._branch_distance})' |
class MinMaxNormalize(Rescale):
def __init__(self, bias=None, scale=None, normalize_bias=True, normalize_scale=True):
super().__init__(bias, scale, normalize_bias, normalize_scale)
def train(self, time_series: TimeSeries):
(bias, scale) = ({}, {})
for (name, var) in time_series.items():
(minval, maxval) = (var.min(), var.max())
bias[name] = minval
scale[name] = np.maximum(1e-08, (maxval - minval))
self.bias = bias
self.scale = scale |
def TetrahedralGraph():
edges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
pos = {0: (0, 0), 1: (0, 1), 2: (cos(((3.5 * pi) / 3)), sin(((3.5 * pi) / 3))), 3: (cos(((5.5 * pi) / 3)), sin(((5.5 * pi) / 3)))}
return Graph(edges, name='Tetrahedron', pos=pos) |
def main(args):
py_model = registry.get_model(args.model)
py_eval_setting = registry.get_eval_setting(args.eval_setting)
if (args.db and utils.evaluation_completed(py_model, py_eval_setting)):
print(f'Evaluation for {py_model.name} x {py_eval_setting.name} already found. Skipping...')
return
args.num_gpus = torch.cuda.device_count()
results_dict = mp.Manager().dict()
mp.spawn(main_worker, nprocs=args.num_gpus, args=(args, results_dict))
(idx_sorted, idx_map) = torch.cat([results_dict[i]['idxs'] for i in range(args.num_gpus)]).sort()
assert idx_sorted.eq(idx_sorted.unique()).all(), 'Error collecting results'
assert idx_sorted.eq(torch.tensor(list(range(idx_sorted.size(0))))).all(), 'Error collecting results'
logits = torch.cat([results_dict[i]['logits'] for i in range(args.num_gpus)])[idx_map]
targets = torch.cat([results_dict[i]['targets'] for i in range(args.num_gpus)])[idx_map]
image_paths = np.concatenate([results_dict[i]['image_paths'] for i in range(args.num_gpus)])[idx_map]
metrics = py_eval_setting.get_metrics(logits, targets, image_paths, py_model)
with open(join(args.logdir, 'metrics.json'), 'w') as outfile:
json.dump(metrics, outfile)
if args.db:
utils.store_evaluation(py_model, py_eval_setting, metrics, logits)
print('Uploaded to db')
utils.close_db_connection()
print('')
print(f'RESULT {args.model} on {args.eval_setting} - {metrics}')
print('') |
def test_get_weekly_info_invalid_date_range():
with TestClient(app) as client:
lower_bound_date = datetime.fromisoformat(LOWER_BOUND_START_DATE).date()
past = (lower_bound_date - timedelta(days=2))
response = client.get(f'/{PREFIX}/weekly_info?begin={past}&end={lower_bound_date}')
assert (response.status_code == 416), 'English articles start on 2018-10-01, so start date should be 2018-10-01 or later'
today = datetime.today().date()
future = (today + timedelta(days=2))
response = client.get(f'/{PREFIX}/weekly_info?begin={today}&end={future}')
assert (response.status_code == 416), 'Cannot request stats for dates in the future' |
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_olsr(module.add_cpp_namespace('olsr'), root_module)
register_functions_ns3_tests(module.add_cpp_namespace('tests'), root_module)
return |
class MultirotorClient(VehicleClient, object):
def __init__(self, length_of_simulation, port):
super(MultirotorClient, self).__init__(length_of_simulation, port=port)
def takeoffAsync(self, timeout_sec=20, vehicle_name=''):
return self.client.call_async('takeoff', timeout_sec, vehicle_name)
def landAsync(self, timeout_sec=60, vehicle_name=''):
return self.client.call_async('land', timeout_sec, vehicle_name)
def goHomeAsync(self, timeout_sec=3e+38, vehicle_name=''):
return self.client.call_async('goHome', timeout_sec, vehicle_name)
def moveByAngleZAsync(self, pitch, roll, z, yaw, duration, vehicle_name=''):
return self.client.call_async('moveByAngleZ', pitch, roll, z, yaw, duration, vehicle_name)
def moveByAngleThrottleAsync(self, pitch, roll, throttle, yaw_rate, duration, vehicle_name=''):
return self.client.call_async('moveByAngleThrottle', pitch, roll, throttle, yaw_rate, duration, vehicle_name)
def moveByVelocityAsync(self, vx, vy, vz, duration, drivetrain=DrivetrainType.MaxDegreeOfFreedom, yaw_mode=YawMode(), vehicle_name=''):
return self.client.call_async('moveByVelocity', vx, vy, vz, duration, drivetrain, yaw_mode, vehicle_name)
def moveByVelocityZAsync(self, vx, vy, z, duration, drivetrain=DrivetrainType.MaxDegreeOfFreedom, yaw_mode=YawMode(), vehicle_name=''):
return self.client.call_async('moveByVelocityZ', vx, vy, z, duration, drivetrain, yaw_mode, vehicle_name)
def moveOnPathAsync(self, path, velocity, timeout_sec=3e+38, drivetrain=DrivetrainType.MaxDegreeOfFreedom, yaw_mode=YawMode(), lookahead=(- 1), adaptive_lookahead=1, vehicle_name=''):
return self.client.call_async('moveOnPath', path, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToPositionAsync(self, x, y, z, velocity, timeout_sec=3e+38, drivetrain=DrivetrainType.MaxDegreeOfFreedom, yaw_mode=YawMode(), lookahead=(- 1), adaptive_lookahead=1, vehicle_name=''):
return self.client.call_async('moveToPosition', x, y, z, velocity, timeout_sec, drivetrain, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveToZAsync(self, z, velocity, timeout_sec=3e+38, yaw_mode=YawMode(), lookahead=(- 1), adaptive_lookahead=1, vehicle_name=''):
return self.client.call_async('moveToZ', z, velocity, timeout_sec, yaw_mode, lookahead, adaptive_lookahead, vehicle_name)
def moveByManualAsync(self, vx_max, vy_max, z_min, duration, drivetrain=DrivetrainType.MaxDegreeOfFreedom, yaw_mode=YawMode(), vehicle_name=''):
return self.client.call_async('moveByManual', vx_max, vy_max, z_min, duration, drivetrain, yaw_mode, vehicle_name)
def rotateToYawAsync(self, yaw, timeout_sec=3e+38, margin=5, vehicle_name=''):
return self.client.call_async('rotateToYaw', yaw, timeout_sec, margin, vehicle_name)
def rotateByYawRateAsync(self, yaw_rate, duration, vehicle_name=''):
return self.client.call_async('rotateByYawRate', yaw_rate, duration, vehicle_name)
def hoverAsync(self, vehicle_name=''):
return self.client.call_async('hover', vehicle_name)
def moveByRC(self, rcdata=RCData(), vehicle_name=''):
return self.client.call('moveByRC', rcdata, vehicle_name)
def getMultirotorState(self, vehicle_name=''):
return MultirotorState.from_msgpack(self.client.call('getMultirotorState', vehicle_name))
getMultirotorState.__annotations__ = {'return': MultirotorState} |
class KITTIRAWDataset(KITTIDataset):
def __init__(self, *args, **kwargs):
super(KITTIRAWDataset, self).__init__(*args, **kwargs)
def get_image_path(self, folder, frame_index, side):
f_str = '{:010d}{}'.format(frame_index, self.img_ext)
image_path = os.path.join(self.data_path, folder, 'image_0{}/data'.format(self.side_map[side]), f_str)
return image_path
def get_depth(self, folder, frame_index, side, do_flip):
calib_path = os.path.join(self.data_path, folder.split('/')[0])
velo_filename = os.path.join(self.data_path, folder, 'velodyne_points/data/{:010d}.bin'.format(int(frame_index)))
depth_gt = generate_depth_map(calib_path, velo_filename, self.side_map[side])
depth_gt = skimage.transform.resize(depth_gt, self.full_res_shape[::(- 1)], order=0, preserve_range=True, mode='constant')
if do_flip:
depth_gt = np.fliplr(depth_gt)
return depth_gt |
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if (not self.device_ids):
return self.module(*inputs, **kwargs)
if (self.gpu0_bsz == 0):
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
(inputs, kwargs) = self.scatter(inputs, kwargs, device_ids)
if ((len(self.device_ids) == 1) or (len(inputs) == 1)):
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if (self.gpu0_bsz == 0):
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0][0].size(self.dim)
if (bsz == 1):
chunk_sizes = [1]
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = ((bsz - gpu0_bsz) // (num_dev - 1))
if (gpu0_bsz < bsz_unit):
chunk_sizes = ([gpu0_bsz] + ([bsz_unit] * (num_dev - 1)))
delta = (bsz - sum(chunk_sizes))
for i in range(delta):
chunk_sizes[(i + 1)] += 1
if (gpu0_bsz == 0):
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim) |
def test_boxcox1p_underflow():
x = np.array([1e-15, 1e-306])
lmbda = np.array([1e-306, 1e-18])
y = boxcox1p(x, lmbda)
assert_allclose(y, np.log1p(x), rtol=1e-14) |
def itilbert(x, h, period=None, _cache=_cache):
tmp = asarray(x)
if iscomplexobj(tmp):
return (itilbert(tmp.real, h, period) + (1j * itilbert(tmp.imag, h, period)))
if (period is not None):
h = (((h * 2) * pi) / period)
n = len(x)
omega = _cache.get((n, h))
if (omega is None):
if (len(_cache) > 20):
while _cache:
_cache.popitem()
def kernel(k, h=h):
if k:
return (- tanh((h * k)))
return 0
omega = convolve.init_convolution_kernel(n, kernel, d=1)
_cache[(n, h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x) |
def is_valid(column_names, data):
return pd.Series([(value[0] > 1) for value in data[column_names].to_numpy()]) |
def test_string_sort():
filenames = ['f9.10.png', 'f9.9.png', 'f10.10.png', 'f10.9.png', 'e9.png', 'e10.png', 'em.png']
expected_filenames = ['e9.png', 'e10.png', 'em.png', 'f9.9.png', 'f9.10.png', 'f10.9.png', 'f10.10.png']
sorted_filenames = sorted(filenames, key=alphanumeric_key)
assert_equal(expected_filenames, sorted_filenames) |
def timezone(zone):
if (zone is None):
raise UnknownTimeZoneError(None)
if (zone.upper() == 'UTC'):
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
raise UnknownTimeZoneError(zone)
zone = _case_insensitive_zone_lookup(_unmunge_zone(zone))
if (zone not in _tzinfo_cache):
if (zone in all_timezones_set):
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone] |
def test_nn_policy_learner_predict():
n_actions = 2
len_list = 1
context = np.ones((100, 2), dtype=np.float32)
context_test = np.array([i for i in range(10)], dtype=np.float32).reshape(5, 2)
action = np.zeros((100,), dtype=int)
reward = np.ones((100,), dtype=np.float32)
pscore = np.array(([0.5] * 100), dtype=np.float32)
desc = '`context` must be 2D array'
with pytest.raises(ValueError, match=f'{desc}*'):
learner = NNPolicyLearner(n_actions=n_actions, len_list=len_list, dim_context=2, off_policy_objective='ipw')
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([1.0, 1.0], dtype=np.float32)
learner.predict(context=invalid_context)
desc = 'Expected `context.shape[1]'
with pytest.raises(ValueError, match=f'{desc}*'):
learner = NNPolicyLearner(n_actions=n_actions, len_list=len_list, dim_context=2, off_policy_objective='ipw')
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
invalid_context = np.array([[1.0, 1.0, 1.0]], dtype=np.float32)
learner.predict(context=invalid_context)
learner = NNPolicyLearner(n_actions=n_actions, len_list=len_list, dim_context=2, off_policy_objective='ipw')
learner.fit(context=context, action=action, reward=reward, pscore=pscore)
action_dist = learner.predict(context=context_test)
assert np.allclose(action_dist.sum(1), np.ones_like((context_test.shape[0], len_list)))
assert (action_dist.shape[0] == 5)
assert (action_dist.shape[1] == n_actions)
assert (action_dist.shape[2] == len_list) |
def sparsestmax(v, rad_in=0, u_in=None):
w = sparsemax(v)
if ((max(w) - min(w)) == 1):
return w
ind = torch.tensor((w > 0)).float()
u = (ind / torch.sum(ind))
if (u_in is None):
rad = rad_in
else:
rad = sqrt(((rad_in ** 2) - torch.sum(((u - u_in) ** 2))))
distance = torch.norm((w - u))
if (distance >= rad):
return w
p = (((rad * (w - u)) / distance) + u)
if (min(p) < 0):
return sparsestmax(p, rad, u)
return p.clamp_(min=0, max=1) |
_utils.test()
def test_vector_index():
val = ti.field(ti.i32)
n = 4
m = 7
p = 11
ti.root.dense(ti.i, n).dense(ti.j, m).dense(ti.k, p).place(val)
def test():
for i in range(n):
for j in range(m):
for k in range(p):
I = ti.Vector([i, j, k])
val[I] = ((i + (j * 2)) + (k * 3))
test()
for i in range(n):
for j in range(m):
for k in range(p):
assert (val[(i, j, k)] == ((i + (j * 2)) + (k * 3))) |
def register_Ns3HighLatencyDataTxVectorTag_methods(root_module, cls):
cls.add_constructor([param('ns3::HighLatencyDataTxVectorTag const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::WifiTxVector', 'dataTxVector')])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetDataTxVector', 'ns3::WifiTxVector', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
return |
def run_multilingual_pipeline(en_has_dependencies=True, fr_has_dependencies=True, **kwargs):
english_text = 'This is an English sentence.'
english_words = ['This', 'is', 'an', 'English', 'sentence', '.']
english_deps_gold = '\n'.join(("('This', 5, 'nsubj')", "('is', 5, 'cop')", "('an', 5, 'det')", "('English', 5, 'amod')", "('sentence', 0, 'root')", "('.', 5, 'punct')"))
if (not en_has_dependencies):
english_deps_gold = ''
french_text = "C'est une phrase francaise."
french_words = ["C'", 'est', 'une', 'phrase', 'francaise', '.']
french_deps_gold = '\n'.join(('("C\'", 4, \'nsubj\')', "('est', 4, 'cop')", "('une', 4, 'det')", "('phrase', 0, 'root')", "('francaise', 4, 'amod')", "('.', 4, 'punct')"))
if (not fr_has_dependencies):
french_deps_gold = ''
if ('lang_configs' in kwargs):
nlp = MultilingualPipeline(model_dir=TEST_MODELS_DIR, download_method=None, **kwargs)
else:
lang_configs = {'en': {'processors': 'tokenize,pos,lemma,depparse'}, 'fr': {'processors': 'tokenize,pos,lemma,depparse'}}
nlp = MultilingualPipeline(model_dir=TEST_MODELS_DIR, download_method=None, lang_configs=lang_configs, **kwargs)
docs = [english_text, french_text]
docs = nlp(docs)
assert (docs[0].lang == 'en')
assert (len(docs[0].sentences) == 1)
assert ([x.text for x in docs[0].sentences[0].words] == english_words)
assert (docs[0].sentences[0].dependencies_string() == english_deps_gold)
assert (len(docs[1].sentences) == 1)
assert (docs[1].lang == 'fr')
assert ([x.text for x in docs[1].sentences[0].words] == french_words)
assert (docs[1].sentences[0].dependencies_string() == french_deps_gold) |
def absolute_error_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
dy = grad_inputs[0]
x0 = inputs[0]
x1 = inputs[1]
m0 = F.greater_equal(x0, x1)
m1 = (1 - m0)
m0 = no_grad(m0)
m1 = no_grad(m1)
dx0 = (dy * (m0 - m1))
dx1 = (- dx0)
return (dx0, dx1) |
def cholesky(a, lower=False, overwrite_a=False, check_finite=True):
(c, lower) = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True, check_finite=check_finite)
return c |
def xavier_normal_(tensor, gain=1.0):
(fan_in, fan_out) = _calculate_fan_in_and_fan_out(tensor)
std = (gain * math.sqrt((2.0 / float((fan_in + fan_out)))))
return _no_grad_normal_(tensor, 0.0, std) |
def add_preamble(source: str, name: Path, comment_prefix: str, custom_preamble: str) -> str:
dashes = ('-' * 77)
preamble = (custom_preamble + textwrap.dedent(f'''
{comment_prefix} {dashes}
{comment_prefix} This file was autogenerated by symforce from template:
{comment_prefix} {name}
{comment_prefix} Do NOT modify by hand.
{comment_prefix} {dashes}
''').lstrip())
return (preamble + source) |
.experimental
def test_predict(log, model):
recs = model.predict(log, users=[0, 1, 7], k=1)
assert (recs.filter((sf.col('user_idx') == 0)).count() == 1)
assert (recs.filter((sf.col('user_idx') == 7)).count() == 0)
assert (recs.count() == 2) |
class TFAutoModelForQuestionAnswering(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING |
class FunctionTransformer(TransformerMixin, BaseEstimator):
_parameter_constraints: dict = {'func': [callable, None], 'inverse_func': [callable, None], 'validate': ['boolean'], 'accept_sparse': ['boolean'], 'check_inverse': ['boolean'], 'feature_names_out': [callable, StrOptions({'one-to-one'}), None], 'kw_args': [dict, None], 'inv_kw_args': [dict, None]}
def __init__(self, func=None, inverse_func=None, *, validate=False, accept_sparse=False, check_inverse=True, feature_names_out=None, kw_args=None, inv_kw_args=None):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.check_inverse = check_inverse
self.feature_names_out = feature_names_out
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def _check_input(self, X, *, reset):
if self.validate:
return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset)
elif reset:
self._check_n_features(X, reset=reset)
self._check_feature_names(X, reset=reset)
return X
def _check_inverse_transform(self, X):
idx_selected = slice(None, None, max(1, (X.shape[0] // 100)))
X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
if hasattr(X, 'dtype'):
dtypes = [X.dtype]
elif hasattr(X, 'dtypes'):
dtypes = X.dtypes
if (not all((np.issubdtype(d, np.number) for d in dtypes))):
raise ValueError("'check_inverse' is only supported when all the elements in `X` is numerical.")
if (not _allclose_dense_sparse(X[idx_selected], X_round_trip)):
warnings.warn("The provided functions are not strictly inverse of each other. If you are sure you want to proceed regardless, set 'check_inverse=False'.", UserWarning)
_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
X = self._check_input(X, reset=True)
if (self.check_inverse and (not ((self.func is None) or (self.inverse_func is None)))):
self._check_inverse_transform(X)
return self
def transform(self, X):
X = self._check_input(X, reset=False)
out = self._transform(X, func=self.func, kw_args=self.kw_args)
if (hasattr(out, 'columns') and (self.feature_names_out is not None)):
if (list(out.columns) != list(self.get_feature_names_out())):
raise ValueError(f'''The output generated by `func` have different column names than the one generated by the method `get_feature_names_out`. Got output with columns names: {list(out.columns)} and `get_feature_names_out` returned: {list(self.get_feature_names_out())}. This can be fixed in different manners depending on your use case:
(i) If `func` returns a container with column names, make sure they are consistent with the output of `get_feature_names_out`.
(ii) If `func` is a NumPy `ufunc`, then forcing `validate=True` could be considered to internally convert the input container to a NumPy array before calling the `ufunc`.
(iii) The column names can be overriden by setting `set_output(transform='pandas')` such that the column names are set to the names provided by `get_feature_names_out`.''')
output_config = _get_output_config('transform', self)['dense']
if ((output_config == 'pandas') and (self.feature_names_out is None) and (not _is_pandas_df(out))):
warnings.warn("When `set_output` is configured to be 'pandas', `func` should return a DataFrame to follow the `set_output` API or `feature_names_out` should be defined.")
return out
def inverse_transform(self, X):
if self.validate:
X = check_array(X, accept_sparse=self.accept_sparse)
return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
_if((lambda self: (self.feature_names_out is not None)))
def get_feature_names_out(self, input_features=None):
if (hasattr(self, 'n_features_in_') or (input_features is not None)):
input_features = _check_feature_names_in(self, input_features)
if (self.feature_names_out == 'one-to-one'):
names_out = input_features
elif callable(self.feature_names_out):
names_out = self.feature_names_out(self, input_features)
else:
raise ValueError(f'feature_names_out={self.feature_names_out!r} is invalid. It must either be "one-to-one" or a callable with two arguments: the function transformer and an array-like of input feature names. The callable must return an array-like of output feature names.')
return np.asarray(names_out, dtype=object)
def _transform(self, X, func=None, kw_args=None):
if (func is None):
func = _identity
return func(X, **(kw_args if kw_args else {}))
def __sklearn_is_fitted__(self):
return True
def _more_tags(self):
return {'no_validation': (not self.validate), 'stateless': True}
def set_output(self, *, transform=None):
if (not hasattr(self, '_sklearn_output_config')):
self._sklearn_output_config = {}
self._sklearn_output_config['transform'] = transform
return self |
class BenchmarkSet():
def __init__(self, scenario: str=None, instance: str=None, active_session: bool=True, session: Union[(rt.InferenceSession, None)]=None, multithread: bool=True, check: bool=True, noisy: bool=False):
assert (scenario is not None), 'Please provide a valid scenario.'
self.config = cfg(scenario)
self.encoding = self._get_encoding()
self.config_space = self._get_config_space()
self.active_session = active_session
self.noisy = noisy
self.check = check
self.quant = None
self.constants = {}
self.session = None
self.archive = []
if (instance is not None):
self.set_instance(instance)
if (self.active_session or (session is not None)):
self.set_session(session, multithread=multithread)
if ('citation' in self.config.config.keys()):
if (self.config.config.get('citation') is not None):
print('Please make sure to also cite:')
print(*self.config.config.get('citation'), sep='\n')
def objective_function(self, configuration: Union[(Dict, List[Dict])], seed: int=None, logging: bool=False, multithread: bool=True):
if ((not self.active_session) or (self.session is None)):
self.set_session(multithread=multithread)
if (isinstance(configuration, dict) or isinstance(configuration, CS.Configuration)):
configuration = [configuration]
configuration = [(cf.get_dictionary() if isinstance(cf, CS.Configuration) else cf) for cf in configuration]
input_names = [x.name for x in self.session.get_inputs()]
output_name = self.session.get_outputs()[0].name
results_list = ([None] * len(configuration))
(x_cont, x_cat) = self._config_to_xs(configuration[0])
for i in range(1, len(configuration)):
(x_cont_, x_cat_) = self._config_to_xs(configuration[i])
x_cont = np.vstack((x_cont, x_cont_))
x_cat = np.vstack((x_cat, x_cat_))
if (seed is not None):
rt.set_seed(seed)
results = self.session.run([output_name], {input_names[0]: x_cat, input_names[1]: x_cont})[0]
for i in range(len(results)):
results_dict = {k: v for (k, v) in zip(self.config.y_names, results[i])}
if logging:
timedate = time.strftime('%D|%H:%M:%S', time.localtime())
self.archive.append({'time': timedate, 'x': configuration[i], 'y': results_dict})
results_list[i] = results_dict
if (not self.active_session):
self.session = None
return results_list
def objective_function_timed(self, configuration: Union[(Dict, List[Dict])], seed: int=None, logging: bool=False, multithread: bool=True):
if (self.quant is None):
self.quant = self._infer_quant()
start_time = time.time()
results = self.objective_function(configuration, seed=seed, logging=logging, multithread=multithread)
if isinstance(results, dict):
results = [results]
runt = sum([result.get(self.config.runtime_name) for result in results])
offset = (time.time() - start_time)
sleepit = (max((runt - offset), 0) * self.quant)
time.sleep(sleepit)
return results
def set_constant(self, param: str, value=None):
if (param is not None):
hpar = self.config_space.get_hyperparameter(param)
if (not hpar.is_legal(value)):
raise Exception(f'Value {value} not allowed for parameter {param}!')
self.constants[param] = value
def set_instance(self, value):
self.set_constant(self.config.instance_names, value)
def get_opt_space(self, drop_fidelity_params: bool=False, seed: int=None):
csn = copy.deepcopy(self.config_space)
hps = csn.get_hyperparameters()
for (p, v) in self.constants.items():
param_idx = csn.get_hyperparameter_names().index(p)
hps[param_idx] = CSH.Constant(p, v)
if drop_fidelity_params:
fidelity_params_idx = [csn.get_hyperparameter_names().index(fidelity_param) for fidelity_param in self.config.fidelity_params]
fidelity_params_idx.sort()
fidelity_params_idx.reverse()
for idx in fidelity_params_idx:
del hps[idx]
cnds = csn.get_conditions()
fbds = csn.get_forbiddens()
cs = CS.ConfigurationSpace(seed=seed)
cs.add_hyperparameters(hps)
cs.add_conditions(cnds)
cs.add_forbidden_clauses(fbds)
return cs
def get_fidelity_space(self, seed: int=None):
csn = copy.deepcopy(self.config_space)
hps = csn.get_hyperparameters()
fidelity_params_idx = [csn.get_hyperparameter_names().index(fidelity_param) for fidelity_param in self.config.fidelity_params]
hps = [hps[idx] for idx in fidelity_params_idx]
cs = CS.ConfigurationSpace(seed=seed)
cs.add_hyperparameters(hps)
return cs
def set_session(self, session: Union[(rt.InferenceSession, None)]=None, multithread: bool=True):
if (session is not None):
self.session = session
elif (self.session is None):
model_path = self._get_model_path()
if (not Path(model_path).is_file()):
raise Exception(f'ONNX file {model_path} not found!')
options = rt.SessionOptions()
if (not multithread):
options.inter_op_num_threads = 1
options.intra_op_num_threads = 1
self.session = rt.InferenceSession(model_path, sess_options=options, providers=['CPUExecutionProvider'])
def instances(self):
if (self.config.instance_names is None):
return self.config.config['instances']
return [*self.config_space.get_hyperparameter(self.config.instance_names).choices]
def instance(self):
return self.constants.get(self.config.instance_names)
def targets(self):
return self.config.y_names
def target_stats(self):
df = pd.read_csv(os.path.join(self.config.config['basedir'], 'global_statistics', 'instance_target_statistics.csv'))
df = df[(df.scenario == self.config.config_id)]
if (self.instance is not None):
df = df[(df.instance == self.instance)]
return df
def properties(self):
props = []
cat = (len(self.config.cat_names) > 1)
cont = (len(self.config.cont_names) >= 1)
props += [('mixed' if (cat & cont) else ('categorical' if cat else 'continuous'))]
if self.config.hierarchical:
props += ['hierarchical']
if (self.config.memory_name != ''):
props += ['memory']
return props
def __repr__(self):
return f'BenchmarkSet({self.config.config_id})'
def _config_to_xs(self, configuration):
if (type(configuration) == CS.Configuration):
configuration = configuration.get_dictionary()
self.config_space._sort_hyperparameters()
configuration = configuration.copy()
configuration = {k: configuration.get(k) for k in self.config_space.get_hyperparameter_names() if (configuration.get(k) is not None)}
if self.check:
self.config_space.check_configuration(CS.Configuration(self.config_space, values=configuration, allow_inactive_with_values=False))
if len(self.constants):
[configuration.update({k: v}) for (k, v) in self.constants.items()]
all = self.config_space.get_hyperparameter_names()
missing = list(set(all).difference(set(configuration.keys())))
for hp in missing:
value = ('#na#' if (hp in self.config.cat_names) else 0)
configuration.update({hp: value})
x_cat = np.array([self._integer_encode(configuration[x], x) for x in self.config.cat_names if (x not in self.config.drop_predict)]).reshape(1, (- 1)).astype(np.int32)
x_cont = np.array([configuration[x] for x in self.config.cont_names]).reshape(1, (- 1)).astype(np.float32)
return (x_cont, x_cat)
def _integer_encode(self, value, name):
return self.encoding.get(name).get(value)
def _get_encoding(self):
with open(self.config.get_path('encoding'), 'r') as f:
encoding = json.load(f)
return encoding
def _get_config_space(self):
with open(self.config.get_path('config_space'), 'r') as f:
json_string = f.read()
cs = CS_json.read(json_string)
return cs
def _eval_random(self):
cfg = self.config_space.sample_configuration().get_dictionary()
return self.objective_function(cfg, logging=False, multithread=False)[0]
def _infer_quant(self):
offsets = []
runtimes = []
for i in range(15):
start_time = time.time()
results = self._eval_random()
runtimes += [results[self.config.runtime_name]]
offsets += [(time.time() - start_time)]
rt = np.mean(np.maximum(np.array(runtimes), 0.0))
quant = np.minimum(((20 * np.max(np.array(offsets))) / rt), 1.0)
return quant
def _get_model_path(self):
path = self.config.get_path('model')
if self.noisy:
self.config.get_path('model_noisy')
return path |
def test_data_frame_complex():
ak_array_in = ak.Array([(1.1 + 0.1j), (2.2 + 0.2j), (3.3 + 0.3j), (4.4 + 0.4j), (5.5 + 0.5j)])
data_frame = ak.to_rdataframe({'x': ak_array_in})
assert (data_frame.GetColumnType('x') == 'std::complex<double>')
ak_array_out = ak.from_rdataframe(data_frame, columns=('x',))
assert (ak_array_in.to_list() == ak_array_out['x'].to_list()) |
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
cfg = compat_cfg(cfg)
logger = get_root_logger(log_level=cfg.log_level)
use_apex = (cfg.optimizer_config.get('type', None) == 'ApexOptimizerHook')
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
runner_type = ('EpochBasedRunner' if ('runner' not in cfg) else cfg.runner['type'])
train_dataloader_default_args = dict(samples_per_gpu=2, workers_per_gpu=2, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=False)
train_loader_cfg = {**train_dataloader_default_args, **cfg.data.get('train_dataloader', {})}
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
auto_scale_lr(cfg, distributed, logger)
if use_apex:
if (apex is None):
raise RuntimeError('apex is not installed')
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.optimizer_config.get('use_fp16', False):
(model, optimizer) = apex.amp.initialize(model.cuda(), optimizer, opt_level='O1')
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = build_ddp(model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
if (not use_apex):
optimizer = build_optimizer(model, cfg.optimizer)
runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if ('cumulative_iters' in cfg.optimizer_config):
if (fp16_cfg is not None):
optimizer_config = GradientCumulativeFp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = DebugGradientCumulativeOptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
elif (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataloader_default_args = dict(samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False, persistent_workers=False)
val_dataloader_args = {**val_dataloader_default_args, **cfg.data.get('val_dataloader', {})}
if (val_dataloader_args['samples_per_gpu'] > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, **val_dataloader_args)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
if ((cfg.resume_from is None) and cfg.get('auto_resume')):
resume_from = find_latest_checkpoint(cfg.work_dir)
if (resume_from is not None):
cfg.resume_from = resume_from
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) |
def cuda_timestamp(sync=False, device=None):
if sync:
torch.cuda.synchronize(device=device)
return time.perf_counter() |
_grad()
def evaluate_performance(model, gallery_loader, query_loader, device, use_gt=False, use_cache=False, use_cbgm=False):
model.eval()
if use_cache:
eval_cache = torch.load('data/eval_cache/eval_cache.pth')
gallery_dets = eval_cache['gallery_dets']
gallery_feats = eval_cache['gallery_feats']
query_dets = eval_cache['query_dets']
query_feats = eval_cache['query_feats']
query_box_feats = eval_cache['query_box_feats']
else:
(gallery_dets, gallery_feats) = ([], [])
for (images, targets) in tqdm(gallery_loader, ncols=0):
(images, targets) = to_device(images, targets, device)
if (not use_gt):
outputs = model(images)
else:
boxes = targets[0]['boxes']
n_boxes = boxes.size(0)
embeddings = model(images, targets)
outputs = [{'boxes': boxes, 'embeddings': torch.cat(embeddings), 'labels': torch.ones(n_boxes).to(device), 'scores': torch.ones(n_boxes).to(device)}]
for output in outputs:
box_w_scores = torch.cat([output['boxes'], output['scores'].unsqueeze(1)], dim=1)
gallery_dets.append(box_w_scores.cpu().numpy())
gallery_feats.append(output['embeddings'].cpu().numpy())
(query_dets, query_feats) = ([], [])
for (images, targets) in tqdm(query_loader, ncols=0):
(images, targets) = to_device(images, targets, device)
outputs = model(images, deepcopy(targets), query_img_as_gallery=True)
gt_box = targets[0]['boxes'].squeeze()
assert ((gt_box - outputs[0]['boxes'][0]).sum() <= 0.001), 'GT box must be the first one in the detected boxes of query image'
for output in outputs:
box_w_scores = torch.cat([output['boxes'], output['scores'].unsqueeze(1)], dim=1)
query_dets.append(box_w_scores.cpu().numpy())
query_feats.append(output['embeddings'].cpu().numpy())
query_box_feats = []
for (images, targets) in tqdm(query_loader, ncols=0):
(images, targets) = to_device(images, targets, device)
embeddings = model(images, targets)
assert (len(embeddings) == 1), 'batch size in test phase should be 1'
query_box_feats.append(embeddings[0].cpu().numpy())
mkdir('data/eval_cache')
save_dict = {'gallery_dets': gallery_dets, 'gallery_feats': gallery_feats, 'query_dets': query_dets, 'query_feats': query_feats, 'query_box_feats': query_box_feats}
torch.save(save_dict, 'data/eval_cache/eval_cache.pth')
eval_detection(gallery_loader.dataset, gallery_dets, det_thresh=0.01)
eval_search_func = (eval_search_cuhk if (gallery_loader.dataset.name == 'CUHK-SYSU') else eval_search_prw)
eval_search_func(gallery_loader.dataset, query_loader.dataset, gallery_dets, gallery_feats, query_box_feats, query_dets, query_feats, cbgm=use_cbgm) |
class ProtobufDetectionModel(torch.nn.Module):
def __init__(self, predict_net, init_net, *, convert_outputs=None):
super().__init__()
self.protobuf_model = ProtobufModel(predict_net, init_net)
self.size_divisibility = get_pb_arg_vali(predict_net, 'size_divisibility', 0)
self.device = get_pb_arg_vals(predict_net, 'device', b'cpu').decode('ascii')
if (convert_outputs is None):
meta_arch = get_pb_arg_vals(predict_net, 'meta_architecture', b'GeneralizedRCNN')
meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode('ascii')]
self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
else:
self._convert_outputs = convert_outputs
def _infer_output_devices(self, inputs_dict):
def _get_device_type(torch_tensor):
assert (torch_tensor.device.type in ['cpu', 'cuda'])
assert (torch_tensor.device.index == 0)
return torch_tensor.device.type
predict_net = self.protobuf_model.net.Proto()
input_device_types = {(name, 0): _get_device_type(tensor) for (name, tensor) in inputs_dict.items()}
device_type_map = infer_device_type(predict_net, known_status=input_device_types, device_name_style='pytorch')
(ssa, versions) = core.get_ssa(predict_net)
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
output_devices = [device_type_map[outp] for outp in versioned_outputs]
return output_devices
def _convert_inputs(self, batched_inputs):
(data, im_info) = convert_batched_inputs_to_c2_format(batched_inputs, self.size_divisibility, self.device)
return {'data': data, 'im_info': im_info}
def forward(self, batched_inputs):
c2_inputs = self._convert_inputs(batched_inputs)
c2_results = self.protobuf_model(c2_inputs)
if any(((t.device.type != 'cpu') for (_, t) in c2_inputs.items())):
output_devices = self._infer_output_devices(c2_inputs)
else:
output_devices = ['cpu' for _ in self.protobuf_model.net.Proto().external_output]
def _cast_caffe2_blob_to_torch_tensor(blob, device):
return (torch.Tensor(blob).to(device) if isinstance(blob, np.ndarray) else None)
c2_results = {name: _cast_caffe2_blob_to_torch_tensor(c2_results[name], device) for (name, device) in zip(self.protobuf_model.net.Proto().external_output, output_devices)}
return self._convert_outputs(batched_inputs, c2_inputs, c2_results) |
def args_parser():
parser = argparse.ArgumentParser(description='Train a model on ENS10')
parser.add_argument('--loss', type=str, default='CRPS', choices=['CRPS', 'L2'], help='Loss function for training (default: CRPS)')
parser.add_argument('--seed', type=int, default=16, help='Torch Seed (default: 16)')
parser.add_argument('--model', type=str, default='UNet', choices=['UNet', 'MLP', 'EMOS', 'Tformer', 'LeNet'], help='Model Architecture (default: UNet)')
parser.add_argument('--ens-num', type=int, default=10, help='Ensemble Number. This is important for EMOS model (default: 10).')
parser.add_argument('--data-path', type=str, default='./', help='The path for both ENS10 and ERA5 datasets (default: ./)')
parser.add_argument('--target-var', type=str, default='z500', choices=['z500', 't850', 't2m'], help='Target variable for prediction (default: z500)')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--lr', '-lr', default=0.01, type=float, help='Learning rate (default: 1e-2)')
parser.add_argument('--epochs', type=int, default=10, help='Epochs (default: 10)')
parser.add_argument('--batch-size', '-b', type=int, default=1, help='Batch size (default: 1)')
parser.add_argument('--make-plot', action='store_true', help='make scatter plot for ens10 and era5')
args = parser.parse_args()
return args |
def kl_bern_criterion(x):
KLD = (torch.mul(x, (torch.log((x + 1e-20)) - math.log(0.5))) + torch.mul((1 - x), (torch.log(((1 - x) + 1e-20)) - math.log((1 - 0.5)))))
return KLD.mean() |
def ref_min_max_quantize(x, qr_min, qr_max, ql_min, ql_max, decay, x_min_max, ema, ste_fine_grained, eps, quantize):
if (not quantize):
return x
raxes = tuple([i for (i, s) in enumerate(ql_min.shape) if (s == 1)])
x_min = np.min(x, raxes, keepdims=True)
x_max = np.max(x, raxes, keepdims=True)
if (x_min_max and ema):
qr_min = ((decay * qr_min) + ((1.0 - decay) * x_min))
qr_max = ((decay * qr_max) + ((1.0 - decay) * x_max))
elif (x_min_max and (not ema)):
qr_min = x_min
qr_max = x_max
scale = ((qr_max - qr_min) / (ql_max - ql_min))
if np.any(((qr_max - qr_min) < eps)):
qr_max[((qr_max - qr_min) < eps)] = (qr_min + eps)
zero_point_from_min = (ql_min - (qr_min / scale))
zero_point_nudged = std_round(zero_point_from_min)
if np.any((zero_point_from_min <= ql_min)):
zero_point_nudged[(zero_point_from_min <= ql_min)] = q_min[(zero_point_from_min <= ql_min)]
if np.any((zero_point_from_min >= ql_max)):
zero_point_nudged[(zero_point_from_min >= ql_max)] = q_max[(zero_point_from_min >= ql_max)]
qr_min_nudged = ((ql_min - zero_point_nudged) * scale)
qr_max_nudged = ((ql_max - zero_point_nudged) * scale)
x_q = ((std_round(((np.clip(x, qr_min_nudged, qr_max_nudged) - qr_min_nudged) / scale)) * scale) + qr_min_nudged)
return x_q |
def get_d_paretomtl(grads, losses, preference_vectors, pref_idx):
current_weight = preference_vectors[pref_idx]
rest_weights = preference_vectors
w = (rest_weights - current_weight)
gx = torch.matmul(w, (losses / torch.norm(losses)))
idx = (gx > 0)
if (torch.sum(idx) <= 0):
(sol, nd) = MinNormSolver.find_min_norm_element_FW([[grads[t]] for t in range(len(grads))])
return torch.tensor(sol).cuda().float()
else:
vec = torch.cat((grads, torch.matmul(w[idx], grads)))
(sol, nd) = MinNormSolver.find_min_norm_element([[vec[t]] for t in range(len(vec))])
sol = torch.Tensor(sol).cuda()
n = preference_vectors.shape[1]
weights = []
for i in range(n):
weight_i = (sol[i] + torch.sum(torch.stack([(sol[j] * w[idx][((j - n), i)]) for j in torch.arange(n, (n + torch.sum(idx)))])))
weights.append(weight_i)
weight = torch.stack(weights)
return weight |
def prepare_examples(all_examples, split='train', max_cell=50, max_row=400, max_table=400):
def chunk_table(table, answer, tid):
if (len(table['text']) == 0):
return None
table_text = [[cell.split()[:max_cell] for cell in row] for row in table['text']]
i_start = (1 if (len(table_text) > 1) else 0)
headers = table_text[0]
while (i_start < len(table_text)):
tmp_table = []
tmp_size = (len(headers[0]) + len(table_text[i_start][0]))
j_start = (1 if (len(table_text[i_start]) > 1) else 0)
i_end = (i_start + 1)
while (j_start < len(table_text[i_start])):
j_end = j_start
for j in range(j_start, len(table_text[i_start])):
if (((tmp_size + len(headers[j])) + len(table_text[i_start][j])) <= max_row):
j_end += 1
tmp_size += (len(headers[j]) + len(table_text[i_start][j]))
else:
break
if (j_start != 0):
tmp_headers = ([headers[0]] + headers[j_start:j_end])
tmp_table.append(([table_text[i_start][0]] + table_text[i_start][j_start:j_end]))
else:
tmp_headers = headers[j_start:j_end]
tmp_table.append(table_text[i_start][j_start:j_end])
if ((j_start == 1) and (j_end == len(table_text[i_start]))):
for row in table_text[(i_start + 1):]:
row_size = sum([len(cell) for cell in row])
if ((row_size + tmp_size) <= max_table):
tmp_table.append(row)
i_end += 1
tmp_size += row_size
else:
break
tmp_headers = [' '.join(cell) for cell in tmp_headers]
tmp_table = [[' '.join(cell) for cell in row] for row in tmp_table]
if (j_start != 0):
i_map = {0: 0}
j_map = {0: 0}
i_map.update({i0: i1 for (i0, i1) in zip(range(1, ((1 + i_end) - i_start)), range(i_start, i_end))})
j_map.update({j0: j1 for (j0, j1) in zip(range(1, ((1 + j_end) - j_start)), range(j_start, j_end))})
else:
i_map = {0: 0}
j_map = {0: 0}
i_map.update({i0: i1 for (i0, i1) in zip(range(1, ((1 + i_end) - i_start)), range(i_start, i_end))})
j_map.update({j0: j1 for (j0, j1) in zip(range(0, j_end), range(j_start, j_end))})
if (answer != ''):
cell_index = find_answer_cell(answer, ([tmp_headers] + tmp_table))
else:
cell_index = None
if (cell_index is None):
cell_index = [(- 1), (- 1)]
cell_span = [(- 1), (- 1)]
else:
if (cell_index[0] == 0):
cell_start = re.search(get_answer_pattern(answer), tmp_headers[cell_index[1]]).start()
else:
cell_start = re.search(get_answer_pattern(answer), tmp_table[(cell_index[0] - 1)][cell_index[1]]).start()
cell_span = [cell_start, (cell_start + len(answer))]
(yield {'table': {'idx': tid, 'data': ([tmp_headers] + tmp_table), 'index': [[table['index'][i_map[i]][j_map[j]] for j in range(len(j_map))] for i in range(len(i_map))], 'values': [[table['values'][i_map[i]][j_map[j]] for j in range(len(j_map))] for i in range(len(i_map))], 'value_ranks': [[table['value_ranks'][i_map[i]][j_map[j]] for j in range(len(j_map))] for i in range(len(i_map))], 'value_inv_ranks': [[table['value_inv_ranks'][i_map[i]][j_map[j]] for j in range(len(j_map))] for i in range(len(i_map))]}, 'answer': {'text': (answer if (cell_index[0] != (- 1)) else ''), 'index': cell_index, 'span': cell_span}})
tmp_size = (len(headers[0]) + len(table_text[i_start][0]))
tmp_table = []
if (j_end == len(table_text[i_start])):
break
else:
j_start = min([(j_start + 4), j_end])
i_end = (i_start + 1)
i_start = i_end
processed_examples = []
missed_examples = []
for (qid, sample) in enumerate(tqdm(all_examples)):
qid = f'{split}-{qid}'
used_t = set()
all_answers = [re.sub(' +', ' ', tags_to_remove.sub('', a[1])) for a in sample['a in table']]
found_answer = False
question = sample['q']
question_values = get_values(question)
for a in sample['a in table']:
if ((a[0] in used_t) and found_answer):
continue
table = sample['t'][a[0]]
if (len(table) == 0):
continue
table = [[re.sub(' +', ' ', tags_to_remove.sub('', cell)) for cell in row] for row in table]
table = process_table(table)
used_t.add(a[0])
answer = re.sub(' +', ' ', tags_to_remove.sub('', a[1]))
for processed_example in chunk_table(table, answer, a[0]):
if (processed_example is None):
break
processed_example.update({'qid': qid, 'question': question, 'question_values': question_values, 'all_answers': all_answers})
processed_examples.append(processed_example)
if (processed_example['answer']['text'] != ''):
found_answer = True
if (not found_answer):
missed_examples.append([qid, sample])
for (tid, table) in enumerate(sample['t']):
if (tid in used_t):
continue
table = [[re.sub(' +', ' ', tags_to_remove.sub('', cell)) for cell in row] for row in table]
if (len(table) == 0):
continue
table = process_table(table)
for processed_example in chunk_table(table, '', tid):
if (processed_example is None):
break
processed_example.update({'qid': qid, 'question': question, 'question_values': question_values, 'all_answers': all_answers})
processed_examples.append(processed_example)
return (processed_examples, missed_examples) |
class _BasePolynomialNetwork(six.with_metaclass(ABCMeta, _BasePoly)):
def __init__(self, degree=2, loss='squared', n_components=5, beta=1, tol=1e-06, fit_lower='augment', warm_start=False, max_iter=10000, verbose=False, random_state=None):
self.degree = degree
self.loss = loss
self.n_components = n_components
self.beta = beta
self.tol = tol
self.fit_lower = fit_lower
self.warm_start = warm_start
self.max_iter = max_iter
self.verbose = verbose
self.random_state = random_state
def _augment(self, X):
if (self.fit_lower == 'augment'):
X = add_dummy_feature(X, value=1)
return X
def fit(self, X, y):
if (self.fit_lower == 'explicit'):
raise NotImplementedError('Explicit fitting of lower orders not yet implemented for polynomialnetwork models.')
(X, y) = self._check_X_y(X, y)
X = self._augment(X)
n_features = X.shape[1]
dataset = get_dataset(X, order='fortran')
rng = check_random_state(self.random_state)
loss_obj = self._get_loss(self.loss)
if (not (self.warm_start and hasattr(self, 'U_'))):
self.U_ = (0.01 * rng.randn(self.degree, self.n_components, n_features))
y_pred = _lifted_predict(self.U_, dataset)
(converged, self.n_iter_) = _cd_lifted(self.U_, dataset, y, y_pred, self.beta, loss_obj, self.max_iter, self.tol, self.verbose)
if (not converged):
warnings.warn('Objective did not converge. Increase max_iter.')
return self
def _predict(self, X):
if (not hasattr(self, 'U_')):
raise NotFittedError('Estimator not fitted.')
X = check_array(X, accept_sparse='csc', dtype=np.double)
X = self._augment(X)
X = get_dataset(X, order='fortran')
return _lifted_predict(self.U_, X) |
def ref_log_det(x):
y = np.zeros(x.shape[0], dtype=np.float32)
for i in range(x.shape[0]):
y[i] = np.linalg.det(x[i])
y = np.abs(y)
y = np.log(y)
return y |
class OptimizerMixin():
__slots__ = ['maxiter', 'verbose']
def __init__(self, **kwargs):
self.maxiter = kwargs.pop('maxiter', 100000)
self.verbose = kwargs.pop('verbose', 0)
if kwargs:
raise exceptions.Unsupported(f'Unsupported kwargs were passed in: {list(kwargs)}.')
def _internal_minimize(self, func, x0, do_grad=False, bounds=None, fixed_vals=None, options={}, par_names=None):
minimizer = self._get_minimizer(func, x0, bounds, fixed_vals=fixed_vals, do_grad=do_grad, par_names=par_names)
result = self._minimize(minimizer, func, x0, do_grad=do_grad, bounds=bounds, fixed_vals=fixed_vals, options=options)
try:
assert result.success
except AssertionError:
log.error(result, exc_info=True)
raise exceptions.FailedMinimization(result)
return result
def _internal_postprocess(self, fitresult, stitch_pars, return_uncertainties=False):
(tensorlib, _) = get_backend()
fitted_pars = stitch_pars(tensorlib.astensor(fitresult.x))
uncertainties = getattr(fitresult, 'unc', None)
if (uncertainties is not None):
num_fixed_pars = (len(fitted_pars) - len(fitresult.x))
uncertainties = np.where(fitresult.minuit.fixed, 0.0, uncertainties)
uncertainties = stitch_pars(tensorlib.astensor(uncertainties), stitch_with=tensorlib.zeros(num_fixed_pars))
if return_uncertainties:
fitted_pars = tensorlib.stack([fitted_pars, uncertainties], axis=1)
correlations = getattr(fitresult, 'corr', None)
if (correlations is not None):
_zeros = tensorlib.zeros(num_fixed_pars)
stitched_columns = [stitch_pars(tensorlib.astensor(column), stitch_with=_zeros) for column in zip(*correlations)]
stitched_rows = [stitch_pars(tensorlib.astensor(row), stitch_with=_zeros) for row in zip(*stitched_columns)]
correlations = tensorlib.stack(stitched_rows, axis=1)
fitresult.x = fitted_pars
fitresult.fun = tensorlib.astensor(fitresult.fun)
fitresult.unc = uncertainties
fitresult.corr = correlations
return fitresult
def minimize(self, objective, data, pdf, init_pars, par_bounds, fixed_vals=None, return_fitted_val=False, return_result_obj=False, return_uncertainties=False, return_correlations=False, do_grad=None, do_stitch=False, **kwargs):
(tensorlib, _) = get_backend()
do_grad = (tensorlib.default_do_grad if (do_grad is None) else do_grad)
(minimizer_kwargs, stitch_pars) = shim(objective, data, pdf, init_pars, par_bounds, fixed_vals, do_grad=do_grad, do_stitch=do_stitch)
try:
par_names = pdf.config.par_names
except AttributeError:
par_names = None
if (par_names and do_stitch and fixed_vals):
for (index, _) in fixed_vals:
par_names[index] = None
par_names = [name for name in par_names if name]
result = self._internal_minimize(**minimizer_kwargs, options=kwargs, par_names=par_names)
result = self._internal_postprocess(result, stitch_pars, return_uncertainties=return_uncertainties)
_returns = [result.x]
if return_correlations:
_returns.append(result.corr)
if return_fitted_val:
_returns.append(result.fun)
if return_result_obj:
_returns.append(result)
return (tuple(_returns) if (len(_returns) > 1) else _returns[0]) |
def get_layer_label(layer, rankdir):
if (rankdir in ('TB', 'BT')):
separator = ' '
else:
separator = '\\n'
if ((layer.type == 'Convolution') or (layer.type == 'Deconvolution')):
node_label = ('"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' % (layer.name, separator, layer.type, separator, (layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1), separator, (layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1), separator, (layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)))
elif (layer.type == 'Pooling'):
pooling_types_dict = get_pooling_types_dict()
node_label = ('"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' % (layer.name, separator, pooling_types_dict[layer.pooling_param.pool], layer.type, separator, layer.pooling_param.kernel_size, separator, layer.pooling_param.stride, separator, layer.pooling_param.pad))
else:
node_label = ('"%s%s(%s)"' % (layer.name, separator, layer.type))
return node_label |
def pad_zeros(A, nrows):
nz = (nrows - A.nrows())
if (nz == 0):
return A
if (nz < 0):
return A.matrix_from_rows(range(nrows))
return A.stack(matrix(ZZ, nz, A.ncols())) |
def get_accuracy(model_repl, param_repl):
acc_1 = []
acc_5 = []
steps = (input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size)
for (_, batch) in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = model_repl(param_repl, batch['image'])
predicted = flax.nn.softmax(predicted)
batch_top_1_acc = tf.keras.metrics.CategoricalAccuracy()(batch['label'].squeeze(), predicted.squeeze()).numpy()
batch_top_5_acc = tf.keras.metrics.TopKCategoricalAccuracy(k=5)(batch['label'].squeeze(), predicted.squeeze()).numpy()
acc_1.append(batch_top_1_acc)
acc_5.append(batch_top_5_acc)
return (np.mean(acc_1), np.mean(acc_5)) |
(**njit_dict_no_parallel)
def compton_scatter(photon, compton_angle):
comov_direction = angle_aberration_gamma(photon.direction, photon.location, photon.time_current)
orthogonal_vector = get_perpendicular_vector(comov_direction)
new_vector = np.dot(euler_rodrigues(compton_angle, orthogonal_vector), comov_direction)
phi = ((2.0 * np.pi) * np.random.random())
final_compton_scattered_vector = np.dot(euler_rodrigues(phi, comov_direction), new_vector)
norm_phi = np.dot(final_compton_scattered_vector, final_compton_scattered_vector)
norm_theta = np.dot(final_compton_scattered_vector, comov_direction)
assert (np.abs((norm_phi - 1)) < 1e-08), 'Error, norm of Compton scatter vector is not 1!'
assert (np.abs((norm_theta - np.cos(compton_angle))) < 1e-08), 'Error, difference between new vector angle and Compton angle is more than 0!'
final_direction = angle_aberration_gamma(final_compton_scattered_vector, photon.location, photon.time_current)
return final_direction |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer'])
register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper'])
register_Ns3EnergyHarvesterHelper_methods(root_module, root_module['ns3::EnergyHarvesterHelper'])
register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LiIonEnergySourceHelper_methods(root_module, root_module['ns3::LiIonEnergySourceHelper'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3RvBatteryModelHelper_methods(root_module, root_module['ns3::RvBatteryModelHelper'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3BasicEnergyHarvesterHelper_methods(root_module, root_module['ns3::BasicEnergyHarvesterHelper'])
register_Ns3BasicEnergySourceHelper_methods(root_module, root_module['ns3::BasicEnergySourceHelper'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TracedValue__Ns3Time_methods(root_module, root_module['ns3::TracedValue< ns3::Time >'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnergyHarvester_methods(root_module, root_module['ns3::EnergyHarvester'])
register_Ns3EnergyHarvesterContainer_methods(root_module, root_module['ns3::EnergyHarvesterContainer'])
register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource'])
register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LiIonEnergySource_methods(root_module, root_module['ns3::LiIonEnergySource'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3RvBatteryModel_methods(root_module, root_module['ns3::RvBatteryModel'])
register_Ns3SimpleDeviceEnergyModel_methods(root_module, root_module['ns3::SimpleDeviceEnergyModel'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BasicEnergyHarvester_methods(root_module, root_module['ns3::BasicEnergyHarvester'])
register_Ns3BasicEnergySource_methods(root_module, root_module['ns3::BasicEnergySource'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Time_Ns3Time_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Time, ns3::Time, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return |
class classifier32ABN(nn.Module):
def __init__(self, num_classes=10, num_ABN=2, feat_dim=None):
if (feat_dim is None):
feat_dim = 128
super(self.__class__, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(64, 128, 3, 2, 1, bias=False)
self.conv4 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(128, 128, 3, 2, 1, bias=False)
self.conv7 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(128, feat_dim, 3, 2, 1, bias=False)
self.bn1 = MultiBatchNorm(64, num_ABN)
self.bn2 = MultiBatchNorm(64, num_ABN)
self.bn3 = MultiBatchNorm(128, num_ABN)
self.bn4 = MultiBatchNorm(128, num_ABN)
self.bn5 = MultiBatchNorm(128, num_ABN)
self.bn6 = MultiBatchNorm(128, num_ABN)
self.bn7 = MultiBatchNorm(128, num_ABN)
self.bn8 = MultiBatchNorm(128, num_ABN)
self.bn9 = MultiBatchNorm(feat_dim, num_ABN)
self.bn10 = MultiBatchNorm(128, num_ABN)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(feat_dim, num_classes, bias=False)
self.dr1 = nn.Dropout2d(0.2)
self.dr2 = nn.Dropout2d(0.2)
self.dr3 = nn.Dropout2d(0.2)
self.apply(weights_init_ABN)
self.cuda()
def forward(self, x, return_feature=False, bn_label=None):
if (bn_label is None):
bn_label = (0 * torch.ones(x.shape[0], dtype=torch.long).cuda())
x = self.dr1(x)
x = self.conv1(x)
(x, _) = self.bn1(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.conv2(x)
(x, _) = self.bn2(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.conv3(x)
(x, _) = self.bn3(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.dr2(x)
x = self.conv4(x)
(x, _) = self.bn4(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.conv5(x)
(x, _) = self.bn5(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.conv6(x)
(x, _) = self.bn6(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.dr3(x)
x = self.conv7(x)
(x, _) = self.bn7(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.conv8(x)
(x, _) = self.bn8(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.conv9(x)
(x, _) = self.bn9(x, bn_label)
x = nn.LeakyReLU(0.2)(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
y = self.fc(x)
if return_feature:
return (x, y)
else:
return y |
def clean_no_mva(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
def load_ppr(input_dir='datasets/ppr/papers', dataset='ogbn-papers100M', idx=None, alpha=0.1, eps=0.001, topk=64, ppr_normalization='row', split_desc=None, make_undirected=None, shape=None):
if (input_dir is None):
return (None, None)
dump_suffix = f'{dataset}'
if (split_desc is not None):
dump_suffix += f'_{split_desc}'
dump_suffix += f'_alpha{int((alpha * 100))}_eps{eps:.0e}_topk{topk}'
dump_suffix += f'_pprnorm{ppr_normalization}'
if (make_undirected is not None):
dump_suffix += f'_indirect{make_undirected}'
if (len(glob.glob((str((Path(input_dir) / ('topk_ppr_' + dump_suffix))) + '*'))) == 0):
logging.info(f"No cached topk ppr found with key '{dump_suffix}' in directory '{input_dir}'")
return (None, None)
ppr_idx = None
if ((split_desc is not None) and (idx is not None)):
ppr_idx = np.load((Path(input_dir) / f'{dump_suffix}_idx.npy'))
return (_load_ppr(input_dir, dump_suffix, shape), ppr_idx) |
.spark
def test_cluster(long_log_with_features, user_features, tmp_path):
path = (tmp_path / 'cluster').resolve()
dataset = create_dataset(long_log_with_features, user_features)
model = ClusterRec()
model.fit(dataset)
base_pred = model.predict(dataset, 5)
save(model, path)
loaded_model = load(path)
new_pred = loaded_model.predict(dataset, 5)
sparkDataFrameEqual(base_pred, new_pred) |
def test_float_assertion(assertion_to_ast_ref):
(assertion_to_ast, ref) = assertion_to_ast_ref
assertion = ass.FloatAssertion(source=ref, value=1.5)
assertion.accept(assertion_to_ast)
assert (__create_source_from_ast(assertion_to_ast.nodes) == 'var_0 = 5\nassert var_0 == pytest.approx(1.5, abs=0.01, rel=0.01)') |
class MultiheadAttention(nn.Module):
def __init__(self, num_hidden_k):
super(MultiheadAttention, self).__init__()
self.num_hidden_k = num_hidden_k
self.attn_dropout = nn.Dropout(p=0.1)
def forward(self, key, value, query):
attn = t.bmm(query, key.transpose(1, 2))
attn = (attn / math.sqrt(self.num_hidden_k))
attn = t.softmax(attn, dim=(- 1))
attn = self.attn_dropout(attn)
result = t.bmm(attn, value)
return (result, attn) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.