code stringlengths 281 23.7M |
|---|
def add_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', required=True)
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
parser.add_argument('--step', type=int)
parser.add_argument('--section', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--beam-size', required=True, type=int)
parser.add_argument('--output-history', action='store_true')
parser.add_argument('--limit', type=int)
parser.add_argument('--mode', default='infer', choices=['infer', 'debug'])
args = parser.parse_args()
return args |
def _get_notebook_kwargs(initial_path=None, notebook_path=None, subcommand=None):
if ((initial_path is not None) and (notebook_path is not None)):
raise RuntimeError("'initial_path' and 'notebook_path' cannot both be set.")
if (notebook_path is not None):
if (not os.path.exists(notebook_path)):
raise RuntimeError(("Notebook path '%s' not found." % notebook_path))
if (not os.path.isfile(notebook_path)):
raise RuntimeError(("Notebook path '%s' is not a file." % notebook_path))
notebook_path = os.path.abspath(notebook_path)
cfg = get_config()
timeout = _get_jupyter_timeout(cfg)
subcommand = (subcommand or _get_jupyter_subcommand(cfg))
if (subcommand not in ('notebook', 'lab')):
raise ValueError(f"Unexpected value '{subcommand}' for Jupyter subcommand. Expected 'notebook' or 'lab'.")
if ((notebook_path is None) and (initial_path is None)):
initial_path = _get_notebook_path(cfg)
if (initial_path and (not os.path.exists(initial_path))):
raise RuntimeError("Directory '%s' does not exist.")
if (initial_path and (not os.path.isdir(initial_path))):
raise RuntimeError("Path '%s' is not a directory.")
return {'initial_path': initial_path, 'notebook_path': notebook_path, 'subcommand': subcommand, 'timeout': timeout} |
class ViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
model_input_names = ['pixel_values']
def __init__(self, do_resize=True, size=224, resample=Image.BILINEAR, do_normalize=True, image_mean=None, image_std=None, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else IMAGENET_STANDARD_MEAN)
self.image_std = (image_std if (image_std is not None) else IMAGENET_STANDARD_STD)
def __call__(self, images: ImageInput, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchFeature:
valid_images = False
if (isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images)):
valid_images = True
elif isinstance(images, (list, tuple)):
if ((len(images) == 0) or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])):
valid_images = True
if (not valid_images):
raise ValueError('Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), `List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples).')
is_batched = bool((isinstance(images, (list, tuple)) and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))))
if (not is_batched):
images = [images]
if (self.do_resize and (self.size is not None)):
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
data = {'pixel_values': images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs |
class DataCollatorForEnDe(DataCollatorForSeq2Seq):
tokenizer: PreTrainedTokenizerBase
text_tokenizer: Optional[Any] = None
train_image_tokenizer: Optional[Any] = None
eval_image_tokenizer: Optional[Any] = None
model: Optional[Any] = None
padding: Union[(bool, str, PaddingStrategy)] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = (- 100)
return_tensors: str = 'pt'
def __call__(self, features, return_tensors=None):
import numpy as np
if (return_tensors is None):
return_tensors = self.return_tensors
labels = ([feature['labels'] for feature in features] if ('labels' in features[0].keys()) else None)
input_pixels = ([feature['input_pixels'] for feature in features] if ('input_pixels' in features[0].keys()) else None)
split = ([feature['split'] for feature in features][0] if ('split' in features[0].keys()) else 'eval')
report_ids = ([feature['report_ids'] for feature in features] if ('report_ids' in features[0].keys()) else None)
node_ids = ([feature['node_ids'] for feature in features] if ('node_ids' in features[0].keys()) else None)
matrix = ([feature['matrix'] for feature in features] if ('matrix' in features[0].keys()) else None)
if (input_pixels is None):
image_paths = [feature['image_path'] for feature in features]
batch_outputs = {}
if (labels is not None):
batch_outputs['labels'] = []
max_label_length = max((len(l) for l in labels))
if (self.pad_to_multiple_of is not None):
max_label_length = ((((max_label_length + self.pad_to_multiple_of) - 1) // self.pad_to_multiple_of) * self.pad_to_multiple_of)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = ([self.label_pad_token_id] * (max_label_length - len(feature['labels'])))
if isinstance(feature['labels'], list):
feature['labels'] = ((feature['labels'] + remainder) if (padding_side == 'right') else (remainder + feature['labels']))
elif (padding_side == 'right'):
feature['labels'] = np.concatenate([feature['labels'], remainder]).astype(np.int64)
else:
feature['labels'] = np.concatenate([remainder, feature['labels']]).astype(np.int64)
batch_outputs['labels'].append(feature['labels'])
if (node_ids is not None):
batch_outputs['node_ids'] = []
batch_outputs['node_mask'] = []
batch_outputs['ngram_labels'] = []
max_len = max((len(l) for l in node_ids))
for feature in features:
remainder = ([self.label_pad_token_id] * (max_len - len(feature['node_ids'])))
feature['node_ids'] = (feature['node_ids'] + remainder)
remainder = ([0] * (max_len - len(feature['node_mask'])))
feature['node_mask'] = (feature['node_mask'] + remainder)
remainder = ([(- 1)] * (max_len - len(feature['ngram_labels'])))
feature['ngram_labels'] = (feature['ngram_labels'] + remainder)
batch_outputs['node_ids'].append(feature['node_ids'])
batch_outputs['node_mask'].append(feature['node_mask'])
batch_outputs['ngram_labels'].append(feature['ngram_labels'])
if (matrix is not None):
batch_outputs['matrix'] = []
max_len = max((len(l) for l in matrix))
for feature in features:
diff = (max_len - len(feature['matrix']))
feature['matrix'] = np.pad(feature['matrix'], ((0, diff), (0, diff)), 'constant', constant_values=(0, 0))
batch_outputs['matrix'].append(feature['matrix'].tolist())
image_tokenizer = (self.train_image_tokenizer if (split == 'train') else self.eval_image_tokenizer)
features = BatchEncoding(batch_outputs, tensor_type=return_tensors)
if (input_pixels is None):
input_pixels = []
for image_path in image_paths:
pixel_value = []
for img_path in image_path:
image = Image.open(img_path).convert('RGB')
pixel_val = image_tokenizer(image)
pixel_value.append(pixel_val)
pixel_value = torch.stack(pixel_value, dim=0)
input_pixels.append(pixel_value)
features['input_pixels'] = torch.stack(input_pixels, dim=0)
if (report_ids is not None):
features['report_ids'] = report_ids
if ((labels is not None) and (self.model is not None) and hasattr(self.model, 'prepare_decoder_input_ids_from_labels')):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features['labels'])
features['decoder_input_ids'] = decoder_input_ids
return features
def pad_sequence(self, seqs, padding_idx, max_len):
new_seqs = []
for seq in seqs:
seq_len = len(seq)
diff = (max_len - seq_len)
new_seqs.append((seq + ([padding_idx] * diff)))
return new_seqs |
class Solution():
def isLongPressedName(self, name: str, typed: str) -> bool:
stack1 = []
stack2 = []
for i in name:
stack1.append(i)
for j in typed:
stack2.append(j)
if (len(stack1) > len(stack2)):
return False
while (stack1 and stack2):
if (stack1[(- 1)] != stack2[(- 1)]):
return False
pop1 = stack1[(- 1)]
count1 = 0
count2 = 0
while (stack1[(- 1)] == pop1):
count1 += 1
stack1.pop()
if (not stack1):
break
while (stack2[(- 1)] == pop1):
count2 += 1
stack2.pop()
if (not stack2):
break
if (count1 > count2):
return False
if (stack1 or stack2):
return False
return True |
class AnalogClock(QWidget):
hourHand = QPolygon([QPoint(7, 8), QPoint((- 7), 8), QPoint(0, (- 40))])
minuteHand = QPolygon([QPoint(7, 8), QPoint((- 7), 8), QPoint(0, (- 70))])
hourColor = QColor(127, 0, 127)
minuteColor = QColor(0, 127, 127, 191)
def __init__(self, parent=None):
super(AnalogClock, self).__init__(parent)
timer = QTimer(self)
timer.timeout.connect(self.update)
timer.start(1000)
self.setWindowTitle('Analog Clock')
self.resize(200, 200)
def paintEvent(self, event):
side = min(self.width(), self.height())
time = QTime.currentTime()
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate((self.width() / 2), (self.height() / 2))
painter.scale((side / 200.0), (side / 200.0))
painter.setPen(Qt.NoPen)
painter.setBrush(AnalogClock.hourColor)
painter.save()
painter.rotate((30.0 * (time.hour() + (time.minute() / 60.0))))
painter.drawConvexPolygon(AnalogClock.hourHand)
painter.restore()
painter.setPen(AnalogClock.hourColor)
for i in range(12):
painter.drawLine(88, 0, 96, 0)
painter.rotate(30.0)
painter.setPen(Qt.NoPen)
painter.setBrush(AnalogClock.minuteColor)
painter.save()
painter.rotate((6.0 * (time.minute() + (time.second() / 60.0))))
painter.drawConvexPolygon(AnalogClock.minuteHand)
painter.restore()
painter.setPen(AnalogClock.minuteColor)
for j in range(60):
if ((j % 5) != 0):
painter.drawLine(92, 0, 96, 0)
painter.rotate(6.0) |
def aggregate_costs(n, flatten=False, opts=None, existing_only=False):
components = dict(Link=('p_nom', 'p0'), Generator=('p_nom', 'p'), StorageUnit=('p_nom', 'p'), Store=('e_nom', 'p'), Line=('s_nom', None), Transformer=('s_nom', None))
costs = {}
for (c, (p_nom, p_attr)) in zip(n.iterate_components(components.keys(), skip_empty=False), components.values()):
if c.df.empty:
continue
if (not existing_only):
p_nom += '_opt'
costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
if (p_attr is not None):
p = c.pnl[p_attr].sum()
if (c.name == 'StorageUnit'):
p = p.loc[(p > 0)]
costs[(c.list_name, 'marginal')] = (p * c.df.marginal_cost).groupby(c.df.carrier).sum()
costs = pd.concat(costs)
if flatten:
assert (opts is not None)
conv_techs = opts['conv_techs']
costs = costs.reset_index(level=0, drop=True)
costs = costs['capital'].add(costs['marginal'].rename({t: (t + ' marginal') for t in conv_techs}), fill_value=0.0)
return costs |
class SmoothMSE(nn.Module):
def __init__(self, opt=None, threshold=0.001):
super().__init__()
self.opt = opt
self.threshold = threshold
def forward(self, x1, x2):
(_, c, h, w) = x1.shape
mse = ((x1 - x2) ** 2).clamp(min=self.threshold)
return mse.mean() |
def symbolic_trace(td_module: TensorDictModule) -> TDGraphModule:
if isinstance(td_module, TensorDictSequential):
return _trace_tensordictsequential(td_module)
elif isinstance(td_module, TensorDictModule):
return _trace_tensordictmodule(td_module)
raise TypeError(f'Unsupported type {type(td_module)}') |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_summarization', model_args, data_args)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with `--source_prefix 'summarize: ' `")
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
if (hasattr(model.config, 'max_position_embeddings') and (model.config.max_position_embeddings < data_args.max_source_length)):
if (model_args.resize_position_embeddings is None):
logger.warning(f"Increasing the model's number of position embedding vectors from {model.config.max_position_embeddings} to {data_args.max_source_length}.")
model.resize_position_embeddings(data_args.max_source_length)
elif model_args.resize_position_embeddings:
model.resize_position_embeddings(data_args.max_source_length)
else:
raise ValueError(f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has {model.config.max_position_embeddings} position encodings. Consider either reducing `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the model's position encodings by passing `--resize_position_embeddings`.")
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
column_names = raw_datasets['validation'].column_names
elif training_args.do_predict:
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
column_names = raw_datasets['test'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.')
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert (data_args.lang is not None), f'{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument'
tokenizer.src_lang = data_args.lang
tokenizer.tgt_lang = data_args.lang
forced_bos_token_id = (tokenizer.lang_code_to_id[data_args.forced_bos_token] if (data_args.forced_bos_token is not None) else None)
model.config.forced_bos_token_id = forced_bos_token_id
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if (data_args.text_column is None):
text_column = (dataset_columns[0] if (dataset_columns is not None) else column_names[0])
else:
text_column = data_args.text_column
if (text_column not in column_names):
raise ValueError(f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}")
if (data_args.summary_column is None):
summary_column = (dataset_columns[1] if (dataset_columns is not None) else column_names[1])
else:
summary_column = data_args.summary_column
if (summary_column not in column_names):
raise ValueError(f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}")
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
if ((training_args.label_smoothing_factor > 0) and (not hasattr(model, 'prepare_decoder_input_ids_from_labels'))):
logger.warning(f'label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory')
def preprocess_function(examples):
(inputs, targets) = ([], [])
for i in range(len(examples[text_column])):
if (examples[text_column][i] and examples[summary_column][i]):
inputs.append(examples[text_column][i])
targets.append(examples[summary_column][i])
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = evaluate.load('rouge')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ['\n'.join(nltk.sent_tokenize(label)) for label in labels]
return (preds, labels)
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {k: round((v * 100), 4) for (k, v) in result.items()}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
return result
training_args.generation_max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length)
training_args.generation_num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams)
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(metric_key_prefix='eval')
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
predict_results = trainer.predict(predict_dataset, metric_key_prefix='predict')
metrics = predict_results.metrics
max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset))
metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, 'generated_predictions.txt')
with open(output_prediction_file, 'w') as writer:
writer.write('\n'.join(predictions))
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'summarization'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
if (data_args.lang is not None):
kwargs['language'] = data_args.lang
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results |
class CustomNamespace(argparse.Namespace):
from_: 'FromArg'
order: 'OrderArg'
format_: 'FormatArg'
summary: bool
output_file: str
ignore_packages: List[str]
packages: List[str]
with_system: bool
with_authors: bool
with_urls: bool
with_description: bool
with_license_file: bool
no_license_path: bool
with_notice_file: bool
filter_strings: bool
filter_code_page: str
fail_on: Optional[str]
allow_only: Optional[str] |
('pypyr.steps.filewritejson.Path')
def test_filewritejson_pass_with_payload_substitutions_encoding(mock_path):
context = Context({'k1': 'v1', 'intkey': 3, 'pathkey': '/arb/path', 'enc': 'utf-32', 'parent': [0, 1, {'child': ['{k1}', '{intkey}', ['a', 'b', 'c']]}], 'nested': '{parent[2][child]}', 'fileWriteJson': {'path': '{pathkey}', 'payload': '{parent[2]}', 'encoding': '{enc}'}})
with io.StringIO() as out_text:
with patch('pypyr.steps.filewritejson.open', mock_open()) as mock_output:
mock_output.return_value.write.side_effect = out_text.write
filewrite.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 7), 'context should have 7 items'
assert (context['k1'] == 'v1')
assert (context['fileWriteJson'] == {'path': '{pathkey}', 'payload': '{parent[2]}', 'encoding': '{enc}'})
assert (context['parent'] == [0, 1, {'child': ['{k1}', '{intkey}', ['a', 'b', 'c']]}])
mock_path.assert_called_once_with('/arb/path')
mocked_path = mock_path.return_value
mocked_path.parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_output.assert_called_once_with(mocked_path, 'w', encoding='utf-32')
assert (out_text.getvalue() == '{\n "child": [\n "v1",\n 3,\n [\n "a",\n "b",\n "c"\n ]\n ]\n}') |
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if (args.log_format is None):
args.log_format = (no_progress_bar if args.no_progress_bar else default)
if ((args.log_format == 'tqdm') and (not sys.stderr.isatty())):
args.log_format = 'simple'
if (args.log_format == 'json'):
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif (args.log_format == 'none'):
bar = noop_progress_bar(iterator, epoch, prefix)
elif (args.log_format == 'simple'):
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)
elif (args.log_format == 'tqdm'):
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
if (args.tensorboard_logdir and distributed_utils.is_master(args)):
try:
import palaas
from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper
bar = fb_tbmf_wrapper(bar, args, args.log_interval)
except ImportError:
bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)
return bar |
def _write_to_tfrecord(filenames, labels, tfrecord_writer):
num_images = len(filenames)
with tf.Graph().as_default():
image_placeholder = tf.placeholder(dtype=tf.uint8)
encoded_image = tf.image.encode_png(image_placeholder)
with tf.Session('') as sess:
for i in range(num_images):
sys.stdout.write(('\r>> Reading images %d/%d' % ((i + 1), num_images)))
sys.stdout.flush()
image_path = filenames[i]
image = Image.open(image_path)
label = labels[i]
png_string = sess.run(encoded_image, feed_dict={image_placeholder: image})
example = utils.image_to_tfexample(png_string, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label)
tfrecord_writer.write(example.SerializeToString()) |
class PuzzleSystemCmdSet(CmdSet):
def at_cmdset_creation(self):
super(PuzzleSystemCmdSet, self).at_cmdset_creation()
self.add(CmdCreatePuzzleRecipe())
self.add(CmdEditPuzzle())
self.add(CmdArmPuzzle())
self.add(CmdListPuzzleRecipes())
self.add(CmdListArmedPuzzles())
self.add(CmdUsePuzzleParts()) |
class MenuButtonWebApp(MenuButton):
__slots__ = ('text', 'web_app')
def __init__(self, text: str, web_app: WebAppInfo, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(type=constants.MenuButtonType.WEB_APP, api_kwargs=api_kwargs)
with self._unfrozen():
self.text: str = text
self.web_app: WebAppInfo = web_app
self._id_attrs = (self.type, self.text, self.web_app)
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['MenuButtonWebApp']:
data = cls._parse_data(data)
if (not data):
return None
data['web_app'] = WebAppInfo.de_json(data.get('web_app'), bot)
return super().de_json(data=data, bot=bot) |
class Ui_Form(object):
def setupUi(self, Form):
if (not Form.objectName()):
Form.setObjectName(u'Form')
Form.resize(410, 401)
self.directory = QLineEdit(Form)
self.directory.setObjectName(u'directory')
self.directory.setGeometry(QRect(140, 39, 251, 21))
self.load_directory = QPushButton(Form)
self.load_directory.setObjectName(u'load_directory')
self.load_directory.setGeometry(QRect(370, 41, 16, 18))
self.label = QLabel(Form)
self.label.setObjectName(u'label')
self.label.setGeometry(QRect(22, 40, 101, 20))
self.label_2 = QLabel(Form)
self.label_2.setObjectName(u'label_2')
self.label_2.setGeometry(QRect(20, 80, 111, 20))
self.load_homography = QPushButton(Form)
self.load_homography.setObjectName(u'load_homography')
self.load_homography.setGeometry(QRect(370, 81, 16, 18))
self.homography_filename = QLineEdit(Form)
self.homography_filename.setObjectName(u'homography_filename')
self.homography_filename.setGeometry(QRect(138, 79, 251, 21))
self.list_filename = QLineEdit(Form)
self.list_filename.setObjectName(u'list_filename')
self.list_filename.setGeometry(QRect(140, 118, 251, 21))
self.label_3 = QLabel(Form)
self.label_3.setObjectName(u'label_3')
self.label_3.setGeometry(QRect(22, 119, 61, 20))
self.load_list = QPushButton(Form)
self.load_list.setObjectName(u'load_list')
self.load_list.setGeometry(QRect(370, 120, 16, 18))
self.label_4 = QLabel(Form)
self.label_4.setObjectName(u'label_4')
self.label_4.setGeometry(QRect(22, 161, 91, 20))
self.output_filename = QLineEdit(Form)
self.output_filename.setObjectName(u'output_filename')
self.output_filename.setGeometry(QRect(140, 160, 251, 21))
self.progressBar = QProgressBar(Form)
self.progressBar.setObjectName(u'progressBar')
self.progressBar.setGeometry(QRect(30, 230, 351, 23))
self.progressBar.setValue(0)
self.confirm = QPushButton(Form)
self.confirm.setObjectName(u'confirm')
self.confirm.setGeometry(QRect(320, 270, 75, 24))
self.error_message = QLabel(Form)
self.error_message.setObjectName(u'error_message')
self.error_message.setGeometry(QRect(60, 270, 241, 20))
self.error_message.setLayoutDirection(Qt.LeftToRight)
self.error_message.setTextFormat(Qt.AutoText)
self.error_message.setAlignment(((Qt.AlignRight | Qt.AlignTrailing) | Qt.AlignVCenter))
self.progress_message = QLabel(Form)
self.progress_message.setObjectName(u'progress_message')
self.progress_message.setGeometry(QRect(30, 210, 311, 20))
self.progress_message.setLayoutDirection(Qt.LeftToRight)
self.progress_message.setTextFormat(Qt.AutoText)
self.progress_message.setAlignment(((Qt.AlignLeading | Qt.AlignLeft) | Qt.AlignVCenter))
self.homography_filename.raise_()
self.directory.raise_()
self.load_directory.raise_()
self.label.raise_()
self.label_2.raise_()
self.load_homography.raise_()
self.list_filename.raise_()
self.label_3.raise_()
self.load_list.raise_()
self.label_4.raise_()
self.output_filename.raise_()
self.progressBar.raise_()
self.confirm.raise_()
self.error_message.raise_()
self.progress_message.raise_()
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate('Form', u'Form', None))
self.load_directory.setText(QCoreApplication.translate('Form', u'...', None))
self.label.setText(QCoreApplication.translate('Form', u'data directory', None))
self.label_2.setText(QCoreApplication.translate('Form', u'homography file', None))
self.load_homography.setText(QCoreApplication.translate('Form', u'...', None))
self.label_3.setText(QCoreApplication.translate('Form', u'index file', None))
self.load_list.setText(QCoreApplication.translate('Form', u'...', None))
self.label_4.setText(QCoreApplication.translate('Form', u'output filename', None))
self.progressBar.setFormat(QCoreApplication.translate('Form', u'%p%', None))
self.confirm.setText(QCoreApplication.translate('Form', u'confirm', None))
self.error_message.setText('')
self.progress_message.setText('') |
class MineIDOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.mineid.MineIDOAuth2'
user_data_url = '
expected_username = ''
access_token_body = json.dumps({'access_token': 'foobar', 'token_type': 'bearer'})
user_data_body = json.dumps({'email': '', 'primary_profile': None})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline() |
class _BaseTestDB(object):
def setup(self):
if self.should_skip():
pytest.skip(('%r unsupported on this system' % self.class_to_test))
(fd, fname) = tempfile.mkstemp(prefix='taskw-testsrc')
dname = tempfile.mkdtemp(prefix='taskw-tests-data')
with open(fname, 'w') as f:
f.writelines([('data.location=%s\n' % dname), 'uda.somestring.label=Testing String\n', 'uda.somestring.type=string\n', 'uda.somedate.label=Testing Date\n', 'uda.somedate.type=date\n', 'uda.somenumber.label=Testing Number\n', 'uda.somenumber.type=numeric\n'])
for piece in ['completed', 'pending', 'undo']:
with open(os.path.sep.join([dname, (piece + '.data')]), 'w'):
pass
(self.fname, self.dname) = (fname, dname)
self.tw = self.class_to_test(config_filename=fname)
if (self.class_to_test == TaskWarriorShellout):
self.tw_marshal = self.class_to_test(config_filename=fname, marshal=True)
def tearDown(self):
os.remove(self.fname)
shutil.rmtree(self.dname)
def test_has_two_categories(self):
tasks = self.tw.load_tasks()
assert (len(tasks) == 2)
assert ('pending' in tasks)
assert ('completed' in tasks)
def test_empty_db(self):
tasks = self.tw.load_tasks()
assert (len(sum(tasks.values(), [])) == 0)
def test_add(self):
self.tw.task_add('foobar')
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
def test_unchanging_load_tasks(self):
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
def test_completion_raising_unspecified(self):
with pytest.raises(KeyError):
self.tw.task_done()
def test_completing_task_by_id_unspecified(self):
self.tw.task_add('foobar')
self.tw.task_done(id=1)
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
assert (len(tasks['completed']) == 1)
assert (len(sum(tasks.values(), [])) == 1)
assert (tasks['completed'][0]['end'] is not None)
assert (tasks['completed'][0]['status'] == 'completed')
def test_completing_task_by_id_specified(self):
self.tw.task_add('foobar')
self.tw.task_done(id=1)
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
assert (len(tasks['completed']) == 1)
assert (len(sum(tasks.values(), [])) == 1)
assert (tasks['completed'][0]['status'] == 'completed')
def test_completing_task_by_id_retrieved(self):
task = self.tw.task_add('foobar')
self.tw.task_done(id=task['id'])
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
assert (len(tasks['completed']) == 1)
assert (len(sum(tasks.values(), [])) == 1)
assert (tasks['completed'][0]['status'] == 'completed')
def test_completing_task_by_uuid(self):
self.tw.task_add('foobar')
uuid = self.tw.load_tasks()['pending'][0]['uuid']
self.tw.task_done(uuid=uuid)
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
assert (len(tasks['completed']) == 1)
assert (len(sum(tasks.values(), [])) == 1)
assert (tasks['completed'][0]['status'] == 'completed')
def test_get_task_mismatch(self):
self.tw.task_add('foobar')
self.tw.task_add('bazbar')
uuid = self.tw.load_tasks()['pending'][0]['uuid']
with pytest.raises(KeyError):
self.tw.get_task(id=2, uuid=uuid)
def test_updating_task(self):
self.tw.task_add('foobar')
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
task = tasks['pending'][0]
task['priority'] = 'L'
self.tw.task_update(task)
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
assert (tasks['pending'][0]['priority'] == 'L')
try:
del tasks['pending'][0]['urgency']
del task['urgency']
del tasks['pending'][0]['id']
del tasks['pending'][0]['modified']
except Exception:
pass
if ('modified' in task):
del task['modified']
assert (tasks['pending'][0] == task)
def test_update_exc(self):
task = dict(description='lol')
with pytest.raises(KeyError):
self.tw.task_update(task)
def test_add_complicated(self):
self.tw.task_add('foobar', uuid='1234-1234', project='some_project')
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
def test_add_timestamp(self):
self.tw.task_add('foobar', uuid='1234-1234', project='some_project', entry='T000000Z')
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
assert (tasks['pending'][0]['entry'] == 'T000000Z')
def test_add_datetime(self):
self.tw.task_add('foobar', uuid='1234-1234', project='some_project', entry=datetime.datetime(2011, 1, 1, tzinfo=dateutil.tz.tzutc()))
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
assert tasks['pending'][0]['entry'].startswith('T')
def test_add_with_uda_string(self):
self.tw.task_add('foobar', somestring='this is a uda')
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
task = tasks['pending'][0]
assert (task['somestring'] == 'this is a uda')
def test_add_with_uda_date(self):
self.tw.task_add('foobar', somedate=datetime.datetime(2011, 1, 1, tzinfo=dateutil.tz.tzutc()))
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
task = tasks['pending'][0]
assert task['somedate'].startswith('T')
def test_remove_uda_string(self):
task = self.tw.task_add('foobar', somestring='this is a uda')
task['somestring'] = None
(id, task) = self.tw.task_update(task)
with pytest.raises(KeyError):
task['somestring']
def test_remove_uda_date(self):
task = self.tw.task_add('foobar', somedate=datetime.datetime(2011, 1, 1))
task['somedate'] = None
(id, task) = self.tw.task_update(task)
with pytest.raises(KeyError):
task['somedate']
def test_remove_uda_numeric(self):
task = self.tw.task_add('foobar', somenumber=15)
task['somenumber'] = None
(id, task) = self.tw.task_update(task)
with pytest.raises(KeyError):
task['somenumber']
def test_completing_completed_task(self):
task = self.tw.task_add('foobar')
self.tw.task_done(uuid=task['uuid'])
with pytest.raises(ValueError):
self.tw.task_done(uuid=task['uuid'])
def test_updating_completed_task(self):
task = self.tw.task_add('foobar')
task = self.tw.task_done(uuid=task['uuid'])
task['priority'] = 'L'
(id, task) = self.tw.task_update(task)
assert (task['priority'] == 'L')
def test_get_task_completed(self):
task = self.tw.task_add('foobar')
task = self.tw.task_done(uuid=task['uuid'])
(id, _task) = self.tw.get_task(uuid=task['uuid'])
assert (id is None)
assert (_task['uuid'] == task['uuid'])
def test_load_task_pending_command(self):
tasks = self.tw.load_tasks(command='pending')
assert (len(tasks) == 1)
assert ('pending' in tasks)
def test_load_task_completed_command(self):
tasks = self.tw.load_tasks(command='completed')
assert (len(tasks) == 1)
assert ('completed' in tasks)
def test_load_task_with_unknown_command(self):
with pytest.raises(ValueError):
self.tw.load_tasks(command='foobar')
def test_updating_deleted_task(self):
task = self.tw.task_add('foobar')
task = self.tw.task_delete(uuid=task['uuid'])
task['priority'] = 'L'
(id, task) = self.tw.task_update(task)
assert (task['priority'] == 'L')
def test_delete(self):
task = self.tw.task_add('foobar')
self.tw.task_delete(uuid=task['uuid'])
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 0)
def test_delete_already_deleted(self):
task = self.tw.task_add('foobar')
self.tw.task_delete(uuid=task['uuid'])
with pytest.raises(ValueError):
self.tw.task_delete(uuid=task['uuid'])
def test_load_tasks_with_one_each(self):
self.tw.task_add('foobar1')
task2 = self.tw.task_add('foobar2')
task2 = self.tw.task_done(uuid=task2['uuid'])
tasks = self.tw.load_tasks()
assert (len(tasks['pending']) == 1)
assert (len(tasks['completed']) == 1)
self.tw.get_task(description='foobar1') |
def test_fail_on_errors(error_pytester: Pytester) -> None:
result = error_pytester.runpytest('-v', '--strict-markers', '--stepwise')
assert (_strip_resource_warnings(result.stderr.lines) == [])
stdout = result.stdout.str()
assert ('test_error ERROR' in stdout)
assert ('test_success_after_fail' not in stdout) |
class TestUtils(unittest.TestCase):
def test_load(self):
problem = Problem(P=np.eye(3), q=np.zeros(3), G=None, h=None, A=None, b=None, lb=None, ub=None, name='TEST')
fpath = os.path.join(tempfile.gettempdir(), 'FOOBAR.npz')
problem.save(fpath)
loaded = Problem.load(fpath)
self.assertEqual(loaded.name, 'FOOBAR') |
class Extract():
def __init__(self, arguments):
logger.debug('Initializing %s: (args: %s', self.__class__.__name__, arguments)
self.args = arguments
Utils.set_verbosity(self.args.loglevel)
self.output_dir = get_folder(self.args.output_dir)
logger.info('Output Directory: %s', self.args.output_dir)
self.images = Images(self.args)
self.alignments = Alignments(self.args, True, self.images.is_video)
self.post_process = PostProcess(arguments)
configfile = (self.args.configfile if hasattr(self.args, 'configfile') else None)
normalization = (None if (self.args.normalization == 'none') else self.args.normalization)
self.extractor = Extractor(self.args.detector, self.args.aligner, self.args.loglevel, configfile=configfile, multiprocess=(not self.args.singleprocess), rotate_images=self.args.rotate_images, min_size=self.args.min_size, normalize_method=normalization)
self.save_queue = queue_manager.get_queue('extract_save')
self.threads = list()
self.verify_output = False
self.save_interval = None
if hasattr(self.args, 'save_interval'):
self.save_interval = self.args.save_interval
logger.debug('Initialized %s', self.__class__.__name__)
def skip_num(self):
return (self.args.extract_every_n if hasattr(self.args, 'extract_every_n') else 1)
def process(self):
logger.info('Starting, this may take a while...')
self.threaded_io('load')
self.threaded_io('save')
self.run_extraction()
for thread in self.threads:
thread.join()
self.alignments.save()
Utils.finalize((self.images.images_found // self.skip_num), self.alignments.faces_count, self.verify_output)
def threaded_io(self, task, io_args=None):
logger.debug("Threading task: (Task: '%s')", task)
io_args = (tuple() if (io_args is None) else (io_args,))
if (task == 'load'):
func = self.load_images
elif (task == 'save'):
func = self.save_faces
elif (task == 'reload'):
func = self.reload_images
io_thread = MultiThread(func, *io_args, thread_count=1)
io_thread.start()
self.threads.append(io_thread)
def load_images(self):
logger.debug('Load Images: Start')
load_queue = self.extractor.input_queue
idx = 0
for (filename, image) in self.images.load():
idx += 1
if load_queue.shutdown.is_set():
logger.debug('Load Queue: Stop signal received. Terminating')
break
if ((idx % self.skip_num) != 0):
logger.trace("Skipping image '%s' due to extract_every_n = %s", filename, self.skip_num)
continue
if ((image is None) or ((not image.any()) and (image.ndim not in (2, 3)))):
logger.warning("Unable to open image. Skipping: '%s'", filename)
continue
imagename = os.path.basename(filename)
if (imagename in self.alignments.data.keys()):
logger.trace("Skipping image: '%s'", filename)
continue
item = {'filename': filename, 'image': image}
load_queue.put(item)
load_queue.put('EOF')
logger.debug('Load Images: Complete')
def reload_images(self, detected_faces):
logger.debug('Reload Images: Start. Detected Faces Count: %s', len(detected_faces))
load_queue = self.extractor.input_queue
idx = 0
for (filename, image) in self.images.load():
idx += 1
if load_queue.shutdown.is_set():
logger.debug('Reload Queue: Stop signal received. Terminating')
break
if ((idx % self.skip_num) != 0):
logger.trace("Skipping image '%s' due to extract_every_n = %s", filename, self.skip_num)
continue
logger.trace("Reloading image: '%s'", filename)
detect_item = detected_faces.pop(filename, None)
if (not detect_item):
logger.warning("Couldn't find faces for: %s", filename)
continue
detect_item['image'] = image
load_queue.put(detect_item)
load_queue.put('EOF')
logger.debug('Reload Images: Complete')
def save_faces(self):
logger.debug('Save Faces: Start')
while True:
if self.save_queue.shutdown.is_set():
logger.debug('Save Queue: Stop signal received. Terminating')
break
item = self.save_queue.get()
logger.trace(item)
if (item == 'EOF'):
break
(filename, face) = item
logger.trace("Saving face: '%s'", filename)
try:
with open(filename, 'wb') as out_file:
out_file.write(face)
except Exception as err:
logger.error("Failed to save image '%s'. Original Error: %s", filename, err)
continue
logger.debug('Save Faces: Complete')
def process_item_count(self):
processed = sum(((os.path.basename(frame) in self.alignments.data.keys()) for frame in self.images.input_images))
logger.debug('Items already processed: %s', processed)
if ((processed != 0) and self.args.skip_existing):
logger.info('Skipping previously extracted frames: %s', processed)
if ((processed != 0) and self.args.skip_faces):
logger.info('Skipping frames with detected faces: %s', processed)
to_process = ((self.images.images_found - processed) // self.skip_num)
logger.debug('Items to be Processed: %s', to_process)
if (to_process == 0):
logger.error('No frames to process. Exiting')
queue_manager.terminate_queues()
exit(0)
return to_process
def run_extraction(self):
to_process = self.process_item_count()
size = (self.args.size if hasattr(self.args, 'size') else 256)
align_eyes = (self.args.align_eyes if hasattr(self.args, 'align_eyes') else False)
exception = False
for phase in range(self.extractor.passes):
if exception:
break
is_final = self.extractor.final_pass
detected_faces = dict()
self.extractor.launch()
self.check_thread_error()
for (idx, faces) in enumerate(tqdm(self.extractor.detected_faces(), total=to_process, file=sys.stdout, desc='Running pass {} of {}: {}'.format((phase + 1), self.extractor.passes, self.extractor.phase.title()))):
self.check_thread_error()
exception = faces.get('exception', False)
if exception:
break
filename = faces['filename']
if self.extractor.final_pass:
self.output_processing(faces, align_eyes, size, filename)
self.output_faces(filename, faces)
if (self.save_interval and (((idx + 1) % self.save_interval) == 0)):
self.alignments.save()
else:
del faces['image']
detected_faces[filename] = faces
if is_final:
logger.debug('Putting EOF to save')
self.save_queue.put('EOF')
else:
logger.debug('Reloading images')
self.threaded_io('reload', detected_faces)
def check_thread_error(self):
for thread in self.threads:
thread.check_and_raise_error()
def output_processing(self, faces, align_eyes, size, filename):
self.align_face(faces, align_eyes, size, filename)
self.post_process.do_actions(faces)
faces_count = len(faces['detected_faces'])
if (faces_count == 0):
logger.verbose('No faces were detected in image: %s', os.path.basename(filename))
if ((not self.verify_output) and (faces_count > 1)):
self.verify_output = True
def align_face(self, faces, align_eyes, size, filename):
final_faces = list()
image = faces['image']
landmarks = faces['landmarks']
detected_faces = faces['detected_faces']
for (idx, face) in enumerate(detected_faces):
detected_face = DetectedFace()
detected_face.from_bounding_box_dict(face, image)
detected_face.landmarksXY = landmarks[idx]
detected_face.load_aligned(image, size=size, align_eyes=align_eyes)
final_faces.append({'file_location': (self.output_dir / Path(filename).stem), 'face': detected_face})
faces['detected_faces'] = final_faces
def output_faces(self, filename, faces):
final_faces = list()
for (idx, detected_face) in enumerate(faces['detected_faces']):
output_file = detected_face['file_location']
extension = Path(filename).suffix
out_filename = '{}_{}{}'.format(str(output_file), str(idx), extension)
face = detected_face['face']
resized_face = face.aligned_face
(face.hash, img) = hash_encode_image(resized_face, extension)
self.save_queue.put((out_filename, img))
final_faces.append(face.to_alignment())
self.alignments.data[os.path.basename(filename)] = final_faces |
def gen_src1_dep_nottaken_test():
return [gen_br2_src1_dep_test(5, 'bne', 1, 1, False), gen_br2_src1_dep_test(4, 'bne', 2, 2, False), gen_br2_src1_dep_test(3, 'bne', 3, 3, False), gen_br2_src1_dep_test(2, 'bne', 4, 4, False), gen_br2_src1_dep_test(1, 'bne', 5, 5, False), gen_br2_src1_dep_test(0, 'bne', 6, 6, False)] |
def create_example(path, pkg_root):
pyproject = (path / 'pyproject.toml')
files = [f'{pkg_root}/pkg/__init__.py', '_files/file.txt']
if (pkg_root != '.'):
files.append(f'{pkg_root}/other/nested/__init__.py')
for file in files:
(path / file).parent.mkdir(exist_ok=True, parents=True)
(path / file).touch()
pyproject.write_text(EXAMPLE)
(path / 'README.md').write_text('hello world')
(path / f'{pkg_root}/pkg/mod.py').write_text('class CustomSdist: pass')
(path / f'{pkg_root}/pkg/__version__.py').write_text('VERSION = (3, 10)')
(path / f'{pkg_root}/pkg/__main__.py').write_text("def exec(): print('hello')") |
class TestAsync(PyScriptTest):
coroutine_script = '\n <script type="py">\n import js\n import asyncio\n js.console.log("first")\n async def main():\n await asyncio.sleep(1)\n js.console.log("third")\n asyncio.{func}(main())\n js.console.log("second")\n </script>\n '
def test_asyncio_ensure_future(self):
self.pyscript_run(self.coroutine_script.format(func='ensure_future'))
self.wait_for_console('third')
assert (self.console.log.lines[(- 3):] == ['first', 'second', 'third'])
def test_asyncio_create_task(self):
self.pyscript_run(self.coroutine_script.format(func='create_task'))
self.wait_for_console('third')
assert (self.console.log.lines[(- 3):] == ['first', 'second', 'third'])
def test_asyncio_gather(self):
self.pyscript_run('\n <script type="py" id="pys">\n import asyncio\n import js\n from pyodide.ffi import to_js\n\n async def coro(delay):\n await asyncio.sleep(delay)\n return(delay)\n\n async def get_results():\n results = await asyncio.gather(*[coro(d) for d in range(3,0,-1)])\n js.console.log(str(results)) #Compare to string representation, not Proxy\n js.console.log("DONE")\n\n asyncio.ensure_future(get_results())\n </script>\n ')
self.wait_for_console('DONE')
assert (self.console.log.lines[(- 2):] == ['[3, 2, 1]', 'DONE'])
_main
def test_multiple_async(self):
self.pyscript_run('\n <script type="py">\n import js\n import asyncio\n async def a_func():\n for i in range(3):\n js.console.log(\'A\', i)\n await asyncio.sleep(0.1)\n asyncio.ensure_future(a_func())\n </script>\n\n <script type="py">\n import js\n import asyncio\n async def b_func():\n for i in range(3):\n js.console.log(\'B\', i)\n await asyncio.sleep(0.1)\n js.console.log(\'b func done\')\n asyncio.ensure_future(b_func())\n </script>\n ')
self.wait_for_console('b func done')
assert (self.console.log.lines == ['A 0', 'B 0', 'A 1', 'B 1', 'A 2', 'B 2', 'b func done'])
_main
def test_multiple_async_multiple_display_targeted(self):
self.pyscript_run('\n <script type="py" id="pyA">\n from pyscript import display\n import js\n import asyncio\n\n async def a_func():\n for i in range(2):\n display(f\'A{i}\', target=\'pyA\', append=True)\n js.console.log("A", i)\n await asyncio.sleep(0.1)\n asyncio.ensure_future(a_func())\n\n </script>\n\n <script type="py" id="pyB">\n from pyscript import display\n import js\n import asyncio\n\n async def a_func():\n for i in range(2):\n display(f\'B{i}\', target=\'pyB\', append=True)\n js.console.log("B", i)\n await asyncio.sleep(0.1)\n js.console.log("B DONE")\n\n asyncio.ensure_future(a_func())\n </script>\n ')
self.wait_for_console('B DONE')
inner_text = self.page.inner_text('html')
assert ('A0\nA1\nB0\nB1' in filter_inner_text(inner_text))
def test_async_display_untargeted(self):
self.pyscript_run('\n <script type="py">\n from pyscript import display\n import asyncio\n import js\n\n async def a_func():\n display(\'A\')\n await asyncio.sleep(1)\n js.console.log("DONE")\n\n asyncio.ensure_future(a_func())\n </script>\n ')
self.wait_for_console('DONE')
assert (self.page.locator('script-py').inner_text() == 'A')
_main
def test_sync_and_async_order(self):
src = '\n <script type="py">\n import js\n js.console.log("1")\n </script>\n\n <script type="py">\n import asyncio\n import js\n\n async def mytask1():\n js.console.log("7")\n await asyncio.sleep(0)\n js.console.log("9")\n\n js.console.log("2")\n asyncio.create_task(mytask1())\n js.console.log("3")\n </script>\n\n <script type="py">\n import js\n js.console.log("4")\n </script>\n\n <script type="py">\n import asyncio\n import js\n\n async def mytask2():\n js.console.log("8")\n await asyncio.sleep(0)\n js.console.log("10")\n js.console.log("DONE")\n\n js.console.log("5")\n asyncio.create_task(mytask2())\n js.console.log("6")\n </script>\n '
self.pyscript_run(src, wait_for_pyscript=False)
self.wait_for_console('DONE')
lines = self.console.log.lines[(- 11):]
assert (lines == ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'DONE']) |
class State(pc.State):
ticker: str = 'Stock Symbol'
ticker2: str = ''
username: str = 'Jay'
logged_in: bool = True
loading: bool = False
def ticker_update2(self):
self.loading = True
self.ticker2 = self.ticker
self.loading = False
def df1(self) -> pd.DataFrame:
stock = yf.Ticker(self.ticker2)
df = stock.history(period='1y')
return df
def line_chart(self) -> go.Figure:
return px.line(self.df1, x=self.df1.index, y='Close', title='chart') |
def test_parent(pytester):
pytester.makefile('.feature', parent=textwrap.dedent(' Feature: Parent\n Scenario: Parenting is easy\n Given I have a parent fixture\n And I have an overridable fixture\n '))
pytester.makeconftest(textwrap.dedent(' from pytest_bdd import given\n\n\n ("I have a parent fixture", target_fixture="parent")\n def _():\n return "parent"\n\n\n ("I have an overridable fixture", target_fixture="overridable")\n def _():\n return "parent"\n\n '))
pytester.makepyfile(textwrap.dedent(' from pytest_bdd import scenario\n\n ("parent.feature", "Parenting is easy")\n def test_parent(request):\n assert request.getfixturevalue("parent") == "parent"\n assert request.getfixturevalue("overridable") == "parent"\n\n '))
result = pytester.runpytest()
result.assert_outcomes(passed=1) |
def getBombMult(mod, src, tgt, distance, tgtSigRadius):
modRange = mod.maxRange
if (modRange is None):
return 0
blastRadius = mod.getModifiedChargeAttr('explosionRange')
atkRadius = src.getRadius()
tgtRadius = tgt.getRadius()
if ((distance is not None) and (distance < max(0, (((modRange - atkRadius) - tgtRadius) - blastRadius)))):
return 0
if ((distance is not None) and (distance > max(0, (((modRange - atkRadius) + tgtRadius) + blastRadius)))):
return 0
return _calcBombFactor(atkEr=mod.getModifiedChargeAttr('aoeCloudSize'), tgtSigRadius=tgtSigRadius) |
def sum_of_ratio_of_minima_among_subsets(num_denom_pairs):
(numerators, denominators) = zip(*num_denom_pairs)
sorted_num_idx = np.argsort(numerators)
sorted_denom_idx = np.argsort(denominators)
sum_ratio = 0
for (i, j) in product(range(len(num_denom_pairs)), range(len(num_denom_pairs))):
candiate_elements = set([sorted_num_idx[i], sorted_denom_idx[j]])
num_superset = set(sorted_num_idx[i:])
denom_superset = set(sorted_denom_idx[j:])
superset = num_superset.intersection(denom_superset)
if (not candiate_elements.issubset(superset)):
continue
num_occurences = (2 ** len((superset - candiate_elements)))
if (len(candiate_elements) == 1):
num_occurences -= 1
min_num = numerators[sorted_num_idx[i]]
min_denom = denominators[sorted_denom_idx[j]]
sum_ratio += ((num_occurences * min_num) / min_denom)
return sum_ratio |
def load_checkpoint(experiment_directory, checkpoint):
checkpoint_path = os.path.join(experiment_directory, Checkpoint.CHECKPOINT_DIR_NAME, checkpoint)
logging.info('Loading checkpoint from {}'.format(checkpoint_path))
checkpoint = Checkpoint.load(checkpoint_path)
seq2seq = checkpoint.model
input_vocab = checkpoint.input_vocab
output_vocab = checkpoint.output_vocab
return (seq2seq, input_vocab, output_vocab) |
class ReplacementField():
arg_name: Union[(None, int, str)]
index_attribute: Sequence[Tuple[(IndexOrAttribute, str)]] = ()
conversion: Optional[str] = None
format_spec: Optional[FormatString] = None
def iter_replacement_fields(self) -> Iterable['ReplacementField']:
(yield self)
if self.format_spec:
for child in self.format_spec.children:
if isinstance(child, ReplacementField):
(yield from child.iter_replacement_fields()) |
class TDF(nn.Module):
def __init__(self, channels, f, bn_factor=16, bias=False, min_bn_units=16, activation=nn.ReLU):
super(TDF, self).__init__()
if (bn_factor is None):
self.tdf = nn.Sequential(nn.Linear(f, f, bias), nn.BatchNorm2d(channels), activation())
else:
bn_units = max((f // bn_factor), min_bn_units)
self.bn_units = bn_units
self.tdf = nn.Sequential(nn.Linear(f, bn_units, bias), nn.BatchNorm2d(channels), activation(), nn.Linear(bn_units, f, bias), nn.BatchNorm2d(channels), activation())
def forward(self, x):
return self.tdf(x) |
class Ed448PrivateKey(metaclass=abc.ABCMeta):
def generate(cls) -> Ed448PrivateKey:
from cryptography.hazmat.backends.openssl.backend import backend
if (not backend.ed448_supported()):
raise UnsupportedAlgorithm('ed448 is not supported by this version of OpenSSL.', _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
return rust_openssl.ed448.generate_key()
def from_private_bytes(cls, data: bytes) -> Ed448PrivateKey:
from cryptography.hazmat.backends.openssl.backend import backend
if (not backend.ed448_supported()):
raise UnsupportedAlgorithm('ed448 is not supported by this version of OpenSSL.', _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
return rust_openssl.ed448.from_private_bytes(data)
def public_key(self) -> Ed448PublicKey:
def sign(self, data: bytes) -> bytes:
def private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:
def private_bytes_raw(self) -> bytes: |
def get_val_dataloader(data_pth, vocab, max_seq_length, val_batch_size, mode='eval', beam_size=0):
data = get_data(data_pth)
(features, vocab) = convert_example_to_feature(data, max_seq_length, vocab, sum_mode=args.sum_mode, context_mode=args.context_mode, get_vocab=False)
if (mode == 'eval'):
val_data = PGNDataset(features)
sampler = SequentialSampler(val_data)
val_dataloader = DataLoader(val_data, sampler=sampler, batch_size=val_batch_size, collate_fn=batchify_data)
else:
decode_features = []
for f in features:
decode_features.extend(([f] * beam_size))
val_data = PGNDataset(decode_features)
sampler = SequentialSampler(val_data)
val_dataloader = DataLoader(val_data, sampler=sampler, batch_size=beam_size, collate_fn=batchify_data)
return (val_dataloader, features) |
def run_eval(args):
print(hparams_debug_string())
synth = Synthesizer()
synth.load(args.checkpoint)
base_path = get_output_base_path(args.checkpoint)
wav = load_wav(args.reference_audio)
mel = melspectrogram(wav).transpose()
for (i, text) in enumerate(tests):
path = ('%s-%d.wav' % (base_path, i))
print(('Synthesizing: %s' % path))
with open(path, 'wb') as f:
f.write(synth.synthesize(text, mel)) |
class GridProportion(Enum):
One = 'one'
Two = 'two'
Three = 'three'
Four = 'four'
Five = 'five'
Six = 'six'
Seven = 'seven'
Eight = 'eight'
Nine = 'nine'
Ten = 'ten'
Eleven = 'eleven'
Twelve = 'twelve'
Thirteen = 'thirteen'
Fourteen = 'fourteen'
Fifteen = 'fifteen'
Sixteen = 'sixteen'
def __str__(self):
return self.value |
_config
def test_floats_kept_above(xmanager):
conn = xcbq.Connection(xmanager.display)
def _wnd(name):
return xmanager.c.window[{w['name']: w['id'] for w in xmanager.c.windows()}[name]]
def _clients():
root = conn.default_screen.root.wid
q = conn.conn.core.QueryTree(root).reply()
stack = list(q.children)
wins = [(w['name'], stack.index(w['id'])) for w in xmanager.c.windows()]
wins.sort(key=(lambda x: x[1]))
return [x[0] for x in wins]
xmanager.test_window('one', floating=True)
xmanager.test_window('two')
assert (_clients() == ['two', 'one'])
xmanager.test_window('three', floating=True)
assert (_clients() == ['two', 'one', 'three']) |
_pypy
def test_instance_method_by_subclass_spy(mocker: MockerFixture) -> None:
class Base():
def bar(self, arg):
return (arg * 2)
class Foo(Base):
pass
spy = mocker.spy(Foo, 'bar')
foo = Foo()
other = Foo()
assert (foo.bar(arg=10) == 20)
assert (other.bar(arg=10) == 20)
calls = [mocker.call(foo, arg=10), mocker.call(other, arg=10)]
assert (spy.call_args_list == calls)
assert (spy.spy_return == 20) |
def test_struct_prop_arity():
m = run_mod('\n #lang pycket\n (require racket/private/kw)\n\n (struct evens (proc)\n #:property prop:procedure (struct-field-index proc)\n #:property prop:arity-string\n (lambda (p)\n "an even number of arguments"))\n (define pairs\n (evens\n (case-lambda\n [() null]\n [(a b . more)\n (cons (cons a b)\n (apply pairs more))])))\n (define x (pairs 1 2 3 4))\n ')
ov = m.defs[W_Symbol.make('x')]
assert isinstance(ov, W_Cons)
e = pytest.raises(SchemeException, run_mod, '\n #lang pycket\n (require racket/private/kw)\n\n (struct evens (proc)\n #:property prop:procedure (struct-field-index proc)\n #:property prop:arity-string\n (lambda (p)\n "an even number of arguments"))\n (define pairs\n (evens\n (case-lambda\n [() null]\n [(a b . more)\n (cons (cons a b)\n (apply pairs more))])))\n (pairs 5)\n ')
assert ('an even number of arguments' in e.value.msg) |
def test_events_for_expired_pairs():
setup = factories.make_transfers_pair(2)
pair = setup.transfers_pair[0]
first_unsafe_block = BlockNumber((pair.payer_transfer.lock.expiration - UNIT_REVEAL_TIMEOUT))
mediator.events_for_expired_pairs(setup.channel_map, setup.transfers_pair, None, first_unsafe_block)
assert (pair.payer_state == 'payer_pending')
payee_expiration_block = pair.payee_transfer.lock.expiration
mediator.events_for_expired_pairs(setup.channel_map, setup.transfers_pair, None, BlockNumber(payee_expiration_block))
assert (pair.payer_state == 'payer_pending')
payer_lock_expiration_threshold = channel.get_sender_expiration_threshold(pair.payer_transfer.lock.expiration)
mediator.events_for_expired_pairs(setup.channel_map, setup.transfers_pair, None, BlockNumber(payer_lock_expiration_threshold))
assert (pair.payer_state == 'payer_expired') |
.parametrize('algorithm', [pytest.param('RS256'), pytest.param('RS384')])
def test_decode_jwt_invalid_algorithm(private_key_pem, public_key, algorithm):
token = jwt.encode(_token_data('aud', 'subject', 'someissuer'), private_key_pem, algorithm)
with pytest.raises(InvalidAlgorithmError) as ite:
max_exp = exp_max_s_option(3600)
decode(token, public_key, algorithms=['ES256'], audience='aud', issuer='someissuer', options=max_exp, leeway=60)
assert ite.match('are not whitelisted') |
class TemplatePlugin(object):
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if (isinstance(conf, (tuple, list)) and (len(conf) == 2)):
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback |
.parametrize('username,password', users)
def test_delete(db, client, username, password):
client.login(username=username, password=password)
instances = QuestionSet.objects.all()
for instance in instances:
url = reverse(urlnames['detail'], args=[instance.pk])
response = client.delete(url)
assert (response.status_code == get_obj_perms_status_code(instance, username, 'delete')), response.json() |
def identifiers_info(code):
try:
tree = ast.parse(code)
except Exception:
return {}
if (not isinstance(tree, ast.Module)):
return {}
identifier2doc = {}
for node in tree.body:
if isinstance(node, ast.Assign):
for name in node.targets:
if hasattr(name, 'id'):
identifier2doc[name.id] = ''
elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
doc_string = (ast.get_docstring(node) or '')
title = doc_string.split('\n\n')[0]
identifier2doc[node.name] = title
return identifier2doc |
def _build_context(img: str, workspace: str) -> IO[bytes]:
f = tempfile.NamedTemporaryFile(prefix='torchx-context', suffix='.tar')
with tarfile.open(fileobj=f, mode='w') as tf:
_copy_to_tarfile(workspace, tf)
if (TORCHX_DOCKERFILE not in tf.getnames()):
info = tarfile.TarInfo(TORCHX_DOCKERFILE)
info.size = len(DEFAULT_DOCKERFILE)
tf.addfile(info, io.BytesIO(DEFAULT_DOCKERFILE))
f.seek(0)
return f |
class TestColor():
def test_color_jitter():
imgs = list(np.random.randint(0, 255, size=(3, 112, 112, 3), dtype=np.uint8))
results = dict(imgs=imgs)
eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32)
eig_vec = np.array([[(- 0.5675), 0.7192, 0.4009], [(- 0.5808), (- 0.0045), (- 0.814)], [(- 0.5836), (- 0.6948), 0.4203]], dtype=np.float32)
color_jitter = ColorJitter()
assert_array_equal(color_jitter.eig_val, eig_val)
assert_array_equal(color_jitter.eig_vec, eig_vec)
assert (color_jitter.alpha_std == 0.1)
assert (color_jitter.color_space_aug is False)
color_jitter_results = color_jitter(results)
target_keys = ['imgs', 'eig_val', 'eig_vec', 'alpha_std', 'color_space_aug']
assert assert_dict_has_keys(color_jitter_results, target_keys)
assert (np.shape(color_jitter_results['imgs']) == (3, 112, 112, 3))
assert_array_equal(color_jitter_results['eig_val'], eig_val)
assert_array_equal(color_jitter_results['eig_vec'], eig_vec)
assert (color_jitter_results['alpha_std'] == 0.1)
assert (color_jitter_results['color_space_aug'] is False)
custom_eig_val = np.ones(3)
custom_eig_vec = np.ones((3, 3))
imgs = list(np.random.randint(0, 255, size=(3, 64, 80, 3), dtype=np.uint8))
results = dict(imgs=imgs)
custom_color_jitter = ColorJitter(True, 0.5, custom_eig_val, custom_eig_vec)
assert_array_equal(color_jitter.eig_val, eig_val)
assert_array_equal(color_jitter.eig_vec, eig_vec)
assert (custom_color_jitter.alpha_std == 0.5)
assert (custom_color_jitter.color_space_aug is True)
custom_color_jitter_results = custom_color_jitter(results)
assert (np.shape(custom_color_jitter_results['imgs']) == (3, 64, 80, 3))
assert_array_equal(custom_color_jitter_results['eig_val'], custom_eig_val)
assert_array_equal(custom_color_jitter_results['eig_vec'], custom_eig_vec)
assert (custom_color_jitter_results['alpha_std'] == 0.5)
assert (custom_color_jitter_results['color_space_aug'] is True)
color_jitter = ColorJitter()
assert (repr(color_jitter) == f'{color_jitter.__class__.__name__}(color_space_aug={False}, alpha_std={0.1}, eig_val={eig_val}, eig_vec={eig_vec})') |
class TestBasicFeatures(TempDirectoryTestCase, OverridePreferencesTestCase):
override_preferences = {'project_manager.sublime-settings': {}}
project_name = None
def setUpClass(cls):
(yield from super().setUpClass())
cls.project_name = os.path.basename(cls._temp_dir)
cls.manager = Manager(cls.window)
def tearDownClass(cls):
(yield from super().tearDownClass())
if (cls.project_name in cls.manager.projects_info.info()):
with patch('sublime.ok_cancel_dialog', return_value=True) as mocked:
cls.manager.remove_project(cls.project_name)
(yield (lambda : mocked.called))
def setUp(self):
(yield from self.__class__.setWindowFolder())
def test_add_and_open_with_mock(self):
def _window_show_input_panel(wid, caption, initial_text, on_done, on_change, on_cancel):
sublime.set_timeout((lambda : on_done(initial_text)), 100)
return 0
with patch('sublime_api.window_show_input_panel', _window_show_input_panel):
self.window.run_command('project_manager', {'action': 'add_project'})
(yield (lambda : (self.window.project_file_name() is not None)))
projects_info = self.manager.projects_info.info()
self.assertTrue((self.project_name in projects_info))
self.window.run_command('close_workspace')
self.assertTrue((self.window.project_file_name() is None))
if (sublime.version() >= '4000'):
def _window_show_quick_panel(wid, items, on_done, *args, **kwargs):
index = next((i for (i, item) in enumerate(items) if item[0].startswith(self.project_name)))
sublime.set_timeout((lambda : on_done(index)), 100)
else:
def _window_show_quick_panel(wid, items, items_per_row, on_done, *args, **kwargs):
index = next((int((i / items_per_row)) for (i, item) in enumerate(items) if (((i % items_per_row) == 0) and item.startswith(self.project_name))))
sublime.set_timeout((lambda : on_done(index)), 100)
with patch('sublime_api.window_show_quick_panel', _window_show_quick_panel):
self.window.run_command('project_manager', {'action': 'open_project'})
(yield (lambda : (self.window.project_file_name() is not None)))
self.assertEqual(os.path.basename(self.window.folders()[0]), self.project_name)
with patch('sublime_api.window_show_quick_panel', _window_show_quick_panel):
with patch('sublime.ok_cancel_dialog', return_value=True):
self.window.run_command('project_manager', {'action': 'remove_project'})
(yield (lambda : (self.window.project_file_name() is None))) |
class SawyerDrawerOpenEnv(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.9, 0.04)
obj_high = (0.1, 0.9, 0.04)
goal_low = ((- 0.1), 0.5499, 0.04)
goal_high = (0.1, 0.5501, 0.04)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': np.array([0.3], dtype=np.float32), 'obj_init_pos': np.array([0.0, 0.9, 0.04], dtype=np.float32), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_drawer.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('handle').copy()
def _get_obs_dict(self):
obs_dict = super()._get_obs_dict()
obs_dict['state_achieved_goal'] = ((self._get_site_pos('handleStart').copy() + self.data.get_geom_xpos('drawer_wall2').copy()) / 2)
return obs_dict
def reset_model(self):
self._reset_hand()
self._target_pos = (self.obj_init_pos - np.array([0.0, 0.35, 0.0]))
self.objHeight = self.data.get_geom_xpos('handle')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos
goal_pos = obj_pos.copy()
goal_pos[1] -= 0.35
self._target_pos = goal_pos
drawer_cover_pos = self.obj_init_pos.copy()
drawer_cover_pos[2] -= 0.02
self.sim.model.body_pos[self.model.body_name2id('drawer')] = self.obj_init_pos
self.sim.model.body_pos[self.model.body_name2id('drawer_cover')] = drawer_cover_pos
self.sim.model.site_pos[self.model.site_name2id('goal')] = self._target_pos
self.maxPullDist = 0.2
self.target_reward = ((1000 * self.maxPullDist) + (1000 * 2))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.abs((objPos[1] - pullGoal[1]))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = max(pullRew, 0)
return pullRew
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
def test_parse_args_unset(capsys):
with pytest.raises(SystemExit) as e:
args = client_parameters.parse_args(('unset',))
assert (e.type == SystemExit)
assert (e.value.code == 2)
captured = capsys.readouterr()
assert ('arguments are required: --hostname, --parameter' in captured.err)
with pytest.raises(SystemExit) as e:
args = client_parameters.parse_args(('unset', '--hostname', 'froodle', '--parameter', 'freedle'))
assert (e.type == SystemExit)
assert (e.value.code == 2)
captured = capsys.readouterr()
assert ('invalid choice' in captured.err)
args = client_parameters.parse_args(('unset', '--hostname', 'froodle', '--parameter', 'user_email'))
assert (args.hostname == 'froodle')
assert (args.parameter == 'user_email') |
def get_volatility(qf_series: QFSeries, frequency: Frequency=None, annualise: bool=True) -> float:
returns_tms = qf_series.to_log_returns()
assert (len(returns_tms) >= 2), 'minimal num_of_rows to receive a real result is 2'
assert ((not annualise) or (frequency is not None))
volatility = returns_tms.std()
if annualise:
volatility = annualise_with_sqrt(volatility, frequency)
return volatility |
class DarcsCommands():
def __init__(self, root):
self.root = root
self.normal_actions = FileSystemCommands()
def create_file(self, path):
self.normal_actions.create_file(path)
self._do(['add', path])
def create_folder(self, path):
self.normal_actions.create_folder(path)
self._do(['add', path])
def move(self, path, new_location):
self._do(['mv', path, new_location])
def remove(self, path):
self.normal_actions.remove(path)
def read(self, path):
return self.normal_actions.read(path)
def write(self, path, data):
self.normal_actions.write(path, data)
def _do(self, args):
_execute((['darcs'] + args), cwd=self.root) |
('pypyr.steps.filewritejson.Path')
def test_filewritejson_pass_no_payload(mock_path):
context = Context({'k1': 'v1', 'fileWriteJson': {'path': '/arb/blah'}})
with io.StringIO() as out_text:
with patch('pypyr.steps.filewritejson.open', mock_open()) as mock_output:
mock_output.return_value.write.side_effect = out_text.write
filewrite.run_step(context)
assert context, "context shouldn't be None"
assert (len(context) == 2), 'context should have 2 items'
assert (context['k1'] == 'v1')
assert (context['fileWriteJson'] == {'path': '/arb/blah'})
mock_path.assert_called_once_with('/arb/blah')
mocked_path = mock_path.return_value
mocked_path.parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_output.assert_called_once_with(mocked_path, 'w', encoding=None)
assert (out_text.getvalue() == '{\n "k1": "v1",\n "fileWriteJson": {\n "path": "/arb/blah"\n }\n}') |
class ElasticConfigurator(BaseConfigurator):
DISTANCE_MAPPING = {Distance.L2: 'l2_norm', Distance.COSINE: 'cosine', Distance.DOT: 'dot_product'}
INDEX_TYPE_MAPPING = {'int': 'long', 'geo': 'geo_point'}
def __init__(self, host, collection_params: dict, connection_params: dict):
super().__init__(host, collection_params, connection_params)
init_params = {**{'verify_certs': False, 'request_timeout': 90, 'retry_on_timeout': True}, **connection_params}
self.client = Elasticsearch(f' basic_auth=(ELASTIC_USER, ELASTIC_PASSWORD), **init_params)
def clean(self):
try:
self.client.indices.delete(index=ELASTIC_INDEX, timeout='5m', master_timeout='5m')
except NotFoundError:
pass
def recreate(self, dataset: Dataset, collection_params):
if (dataset.config.distance == Distance.DOT):
raise IncompatibilityError
if (dataset.config.vector_size > 2048):
raise IncompatibilityError
self.client.indices.create(index=ELASTIC_INDEX, settings={'index': {'number_of_shards': 1, 'number_of_replicas': 0, 'refresh_interval': (- 1)}}, mappings={'_source': {'excludes': ['vector']}, 'properties': {'vector': {'type': 'dense_vector', 'dims': dataset.config.vector_size, 'index': True, 'similarity': self.DISTANCE_MAPPING[dataset.config.distance], 'index_options': {**{'type': 'hnsw', 'm': 16, 'ef_construction': 100}, **collection_params.get('index_options')}}, **self._prepare_fields_config(dataset)}})
def _prepare_fields_config(self, dataset: Dataset):
return {field_name: {'type': self.INDEX_TYPE_MAPPING.get(field_type, field_type), 'index': True} for (field_name, field_type) in dataset.config.schema.items()} |
class TestCopyPlane(EndianTest):
def setUp(self):
self.req_args_0 = {'bit_plane': , 'dst_drawable': , 'dst_x': (- 25480), 'dst_y': (- 26229), 'gc': , 'height': 60447, 'src_drawable': , 'src_x': (- 4634), 'src_y': (- 17345), 'width': 53771}
self.req_bin_0 = b'?\x00\x00\x080\xf8 \x8do\xa4)H\x04\xf5\xed\x85\xed\xe6\xbc?\x9cx\x99\x8b\xd2\x0b\xec\x1f:\xec7j'
def testPackRequest0(self):
bin = request.CopyPlane._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.CopyPlane._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default='segments/sidewalk-semantic', metadata={'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
reduce_labels: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to reduce all labels by 1 and replace background by 255.'})
def __post_init__(self):
if ((self.dataset_name is None) and ((self.train_dir is None) and (self.validation_dir is None))):
raise ValueError('You must specify either a dataset name from the hub or a train and/or validation directory.') |
def get_all_var_mappings(a):
if (is_ast(a) and (not is_literal(a))):
if (type(a) == name_e):
return {a.id: None}
if is_comprehension(a):
vs = get_all_var_mappings(a.expr)
for g in a.generators:
gvs = vs.update(get_all_var_mappings(g))
for tv in get_all_vars(g.target):
if (tv in gvs):
del gvs[tv]
vs.update(gvs)
return vs
if (type(a) == call_e):
if (isinstance(a.func, name_e) and (a.func.id == 'make_pql_tuple')):
t = a.args[0].values
vs = {}
for tvs in [get_all_var_mappings(get_ast(x.values[0].value)) for x in t]:
vs.update(tvs)
return vs
if (isinstance(a.func, name_e) and (a.func.id == 'PyQuery')):
vs = {}
clauses = eval(print_ast(a.args[0]))
defined_vars = set()
used_vars = set()
for c in clauses:
used_vars = used_vars.union(c.used_vars())
for v in (used_vars - defined_vars):
vs[v] = None
return vs
else:
vs = {}
for nested_vs in [get_all_var_mappings(y) for y in a.args]:
vs.update(nested_vs)
return vs
if (type(a) == attribute_e):
if (type(a.value) == name_e):
return {a.value.id: a.attribute.id}
return get_all_var_mappings(a.value)
retvars = {}
for x in a:
if is_ast(x):
retvars.update(get_all_var_mappings(x))
elif (type(x) == list):
for y in x:
if is_ast(y):
retvars.update(get_all_var_mappings(y))
return retvars
return {} |
def _add_se(cv_results, copy=False):
n_folds = None
if copy:
cv_results = deepcopy(cv_results)
scores_with_std = [k for k in cv_results.keys() if (k[0:4] == 'std_')]
for k in scores_with_std:
s = k.split('_')
s[0] = 'se'
se_name = '_'.join(s)
if (se_name not in cv_results):
if (n_folds is None):
n_folds = _infer_n_folds(cv_results)
cv_results[se_name] = (np.array(cv_results[k]) / np.sqrt(n_folds))
return cv_results |
def test_includes() -> None:
poetry = Factory().create_poetry(project('with-include'))
builder = SdistBuilder(poetry)
builder.build()
sdist = (((fixtures_dir / 'with-include') / 'dist') / 'with_include-1.2.3.tar.gz')
assert sdist.exists()
with tarfile.open(str(sdist), 'r') as tar:
assert ('with_include-1.2.3/extra_dir/vcs_excluded.txt' in tar.getnames())
assert ('with_include-1.2.3/notes.txt' in tar.getnames()) |
def video_loader(video_dir_path, frame_indices, image_loader):
video = []
for i in frame_indices:
image_path = os.path.join(video_dir_path, 'image-{:04d}.png'.format(i))
if os.path.exists(image_path):
video.append(image_loader(image_path))
else:
print(image_path)
return video
return video |
def test_setup_proxies_all_addresses_are_given():
chain_id = ChainID(5)
config = RaidenConfig(chain_id=chain_id, environment_type=Environment.DEVELOPMENT)
contracts = load_deployed_contracts_data(config, chain_id)
proxy_manager = MockProxyManager(node_address=make_address())
deployed_addresses = load_deployment_addresses_from_contracts(contracts)
with patch.object(pathfinding, 'get_pfs_info', return_value=PFS_INFO):
raiden_bundle = raiden_bundle_from_contracts_deployment(proxy_manager=proxy_manager, token_network_registry_address=deployed_addresses.token_network_registry_address, secret_registry_address=deployed_addresses.secret_registry_address)
services_bundle = services_bundle_from_contracts_deployment(config=config, proxy_manager=proxy_manager, deployed_addresses=deployed_addresses, routing_mode=RoutingMode.PRIVATE, pathfinding_service_address='my-pfs', enable_monitoring=True)
assert raiden_bundle
assert services_bundle
assert raiden_bundle.token_network_registry
assert raiden_bundle.secret_registry
assert services_bundle.user_deposit
assert (not services_bundle.service_registry) |
def main():
cache = get_cache()
failed_uris = get_failed()
parse_failed_uris = get_parse_failed()
uris = cache.keys()
peak_missing = [uri for uri in uris if (LISTENERPEAK not in cache[uri])]
peak_missing = (set(peak_missing) - failed_uris)
peak_missing = {get_root(uri) for uri in peak_missing}
peak_missing = (set(peak_missing) - parse_failed_uris)
pool = Pool(PROCESSES)
try:
pfunc = fetch_stream_infos
for (i, res) in enumerate(pool.imap_unordered(pfunc, peak_missing)):
(uri, streams) = res
if (((i + 1) % 1000) == 0):
set_cache(cache)
print(((('%d/%d ' % ((i + 1), len(peak_missing))) + uri) + ' -> '), end='')
print(('%d new streams' % len(streams)))
if (not streams):
parse_failed_uris.add(uri)
for stream in streams:
peak = str(int(stream.peak))
current = str(int(stream.current))
uri = stream.stream
if (uri not in cache):
cache[uri] = {}
if (LISTENERPEAK in cache[uri]):
cache[uri][LISTENERPEAK].append(peak)
else:
cache[uri][LISTENERPEAK] = [peak]
if (LISTENERCURRENT in cache[uri]):
cache[uri][LISTENERCURRENT].append(current)
else:
cache[uri][LISTENERCURRENT] = [current]
except Exception as e:
print(e)
finally:
set_parse_failed(parse_failed_uris)
set_cache(cache)
pool.terminate()
pool.join() |
def format_type_distinctly(*types: Type, options: Options, bare: bool=False) -> tuple[(str, ...)]:
overlapping = find_type_overlaps(*types)
for verbosity in range(2):
strs = [format_type_inner(type, verbosity=verbosity, options=options, fullnames=overlapping) for type in types]
if (len(set(strs)) == len(strs)):
break
if bare:
return tuple(strs)
else:
return tuple((quote_type_string(s) for s in strs)) |
class TestAllocNamedColor(EndianTest):
def setUp(self):
self.req_args_0 = {'cmap': , 'name': 'octarin'}
self.req_bin_0 = b'U\x00\x05\x00\x19\x00X\x1f\x07\x00\x00\x00octarin\x00'
self.reply_args_0 = {'exact_blue': 50619, 'exact_green': 55944, 'exact_red': 40316, 'pixel': , 'screen_blue': 27416, 'screen_green': 30102, 'screen_red': 5028, 'sequence_number': 64739}
self.reply_bin_0 = b'\x01\x00\xe3\xfc\x00\x00\x00\x00\x81D\xd2<|\x9d\x88\xda\xbb\xc5\xa4\x13\x96u\x18k\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.AllocNamedColor._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.AllocNamedColor._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.AllocNamedColor._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.AllocNamedColor._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
class TFCI(BinaryCodec):
fmt = '.tfci'
_models = ['bmshj2018-factorized-mse', 'bmshj2018-hyperprior-mse', 'mbt2018-mean-mse']
def description(self):
return 'TFCI'
def name(self):
return f'{self.model}'
def setup_args(cls, parser):
super().setup_args(parser)
parser.add_argument('-m', '--model', choices=cls._models, default=cls._models[0], help='model architecture (default: %(default)s)')
parser.add_argument('-p', '--path', required=True, help='tfci python script path (default: %(default)s)')
def _set_args(self, args):
args = super()._set_args(args)
self.model = args.model
self.tfci_path = args.path
return args
def _get_encode_cmd(self, img, quality, out_filepath):
if (not (1 <= quality <= 8)):
raise ValueError(f'Invalid quality value: {quality} (1, 8)')
cmd = [sys.executable, self.tfci_path, 'compress', f'{self.model}-{quality:d}', img, out_filepath]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = [sys.executable, self.tfci_path, 'decompress', out_filepath, rec_filepath]
return cmd |
class Snowflake(IDConverter):
async def convert(self, ctx: Context, arg: str) -> int:
error = f'Invalid snowflake {arg!r}'
if (not self._get_id_match(arg)):
raise BadArgument(error)
snowflake = int(arg)
try:
time = snowflake_time(snowflake)
except (OverflowError, OSError) as e:
raise BadArgument(f'{error}: {e}')
if (time < DISCORD_EPOCH_DT):
raise BadArgument(f'{error}: timestamp is before the Discord epoch.')
if ((datetime.now(UTC) - time).days < (- 1)):
raise BadArgument(f'{error}: timestamp is too far into the future.')
return snowflake |
def process_scale(args):
sampler = BoundarySampler()
reader = FrameDataReader(args.seq_folder, check_image=True)
batch_end = reader.cvt_end(args.end)
outdir = paths['PROCESSED_PATH']
smpl_name = args.smpl_name
obj_name = args.obj_name
landmark = BodyLandmarks(assets_root=paths['SMPL_ASSETS_ROOT'])
smpl_depth = args.smpl_depth
kin_transform = KinectTransform(args.seq_folder, kinect_count=reader.kinect_count)
scale_skipped = 0
loop = tqdm(range(args.start, batch_end, args.interval))
loop.set_description(f'{reader.seq_name} {args.start}-{batch_end}')
for idx in loop:
smpl_fit = reader.get_smplfit(idx, smpl_name)
obj_fit = reader.get_objfit(idx, obj_name)
if ((smpl_fit is None) or (obj_fit is None)):
continue
for kid in args.kids:
outfolder = join(outdir, reader.seq_name, reader.frame_time(idx))
os.makedirs(outfolder, exist_ok=True)
outfile = join(outfolder, f'{reader.frame_time(idx)}_k{kid}_{args.data_name}.npz')
if (isfile(outfile) and (not args.redo)):
continue
elif isfile(outfile):
os.system(f'rm {outfile}')
smpl_local = kin_transform.world2color_mesh(smpl_fit, kid)
obj_local = kin_transform.world2color_mesh(obj_fit, kid)
if args.flip:
outfile = outfile.replace('.npz', '_flip.npz')
smpl_local = kin_transform.flip_mesh(smpl_local)
obj_local = kin_transform.flip_mesh(obj_local)
smpl_center = landmark.get_smpl_center(smpl_local)
scale = (smpl_depth / smpl_center[2])
smpl_local.v = (smpl_local.v * scale)
obj_local.v = (obj_local.v * scale)
if ((scale < 0.6) or (scale > 1.5)):
print('Warnning the scale {} maybe invalid! on file {}, skipped'.format(scale, outfile))
scale_skipped += 1
continue
new_center = landmark.get_smpl_center(smpl_local)
assert (np.abs((new_center[2] - smpl_depth)) <= 1e-06), 'found new depth: {}, target depth: {}'.format(new_center, smpl_depth)
os.makedirs(outfolder, exist_ok=True)
data_dict = sampler.boundary_sample_all(landmark, smpl_local, obj_local, args.sigmas, args.ratios, args.sample_num, grid_ratio=args.grid_ratio, flip=args.flip)
image_file = reader.get_color_files(idx, [kid])[0]
assert (np.abs((data_dict['smpl_center'][2] - smpl_depth)) <= 1e-07), 'found new depth: {}, target depth: {}'.format(data_dict['smpl_center'], smpl_depth)
data_dict['image_file'] = image_file
data_dict['sigmas'] = np.array(args.sigmas)
np.savez(outfile, **data_dict)
print('skipped {} files, all done'.format(scale_skipped)) |
def create_dataset(h5_path='test.h5'):
X = np.random.randn(200, 10).astype('float32')
y = np.random.randint(0, 2, size=(200, 1))
f = h5py.File(h5_path, 'w')
X_dset = f.create_dataset('my_data', (200, 10), dtype='f')
X_dset[:] = X
y_dset = f.create_dataset('my_labels', (200, 1), dtype='i')
y_dset[:] = y
f.close() |
def train_config(parser):
base_dir = os.getenv('PT_OUTPUT_DIR', '../model_data/san/')
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(), help='Use GPU acceleration.')
parser.add_argument('--multi_gpu', action='store_true', help='multi gpu training.')
parser.add_argument('--log_per_updates', type=int, default=53)
parser.add_argument('--progress_per_updates', type=int, default=500)
parser.add_argument('--epoches', type=int, default=50)
parser.add_argument('--continue_epoches', type=int, default=None, help='if continue train, add this amount of epoches. Default to use --epoches.')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--valid_batch_size', type=int, default=16)
parser.add_argument('--optimizer', default='adamax', help='supported optimizer: adamax, sgd, adadelta, adam')
parser.add_argument('--ema', action='store_false', help='use exponential moving average for testing.')
parser.add_argument('--ema_gamma', type=float, default=0.995, help='gamma for ema.')
parser.add_argument('--grad_clipping', type=float, default=10)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=0.002)
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.3)
parser.add_argument('--dropout_emb', type=float, default=0.4)
parser.add_argument('--dropout_w', type=float, default=0.05)
parser.add_argument('--dw_type', type=int, default=0)
parser.add_argument('--unk_id', type=int, default=1)
parser.add_argument('--dataset_include_ratio', default=(- 1.0), type=float, help='ratio to include other datasets (paper formulation). override batches_mix_ratio.')
parser.add_argument('--uncertainty_loss', action='store_true', help='Uncertainty-based loss using Kendall et al., 2017.')
parser.add_argument('--no_lr_scheduler', dest='have_lr_scheduler', action='store_false')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--fix_embeddings', action='store_true', help='if true, `tune_partial` will be ignored.')
parser.add_argument('--tune_partial', type=int, default=1000, help='finetune top-x embeddings (including <PAD>, <UNK>).')
parser.add_argument('--model_dir', default=('%s/' % base_dir))
parser.add_argument('--resume_dir', default=None, type=str, help='model_dir to resume.')
parser.add_argument('--resume', default='best_checkpoint.pt', type=str, help='model name to resume.')
parser.add_argument('--resume_last_epoch', action='store_true', help='Restore the last previously stored model in model_dir. Will override resume_dir options.')
parser.add_argument('--new_random_state', action='store_true', help='Use new random states for resume.')
parser.add_argument('--not_resume_optimizer', type=str2bool, default=False, help='Do not restore state of optimizer.')
parser.add_argument('--new_scheduler', action='store_true', help='use a new scheduler, multi step 2,4,6.')
parser.add_argument('-ro', '--resume_options', action='store_true', help='use previous model options, ignore the cli and defaults.')
parser.add_argument('--seed', type=int, default=2018, help='random seed for data shuffling, embedding init, etc.')
return parser |
_required
def org_create(request):
if (settings.RESTRICT_ORG_CREATION and (not request.user.is_superuser)):
messages.error(request, _('Only super users can create an organization.'))
return redirect('user_dashboard')
user = get_session_user(request)
ctx = {'user': user}
if (request.method == 'POST'):
form = OrgCreationForm(request.POST)
if form.is_valid():
org = form.save()
org.members.add(user)
perm = Permission.objects.get(organization=org)
perm.set_all(True)
messages.success(request, _("You successfully created organization '{}'".format(org.name)))
return redirect('user_dashboard')
else:
ctx.update({'form': form})
return render(request, 'petition/org_create.html', ctx)
form = OrgCreationForm()
ctx.update({'form': form})
return render(request, 'petition/org_create.html', ctx) |
class PropertyGroup(bpy.types.PropertyGroup):
start_frame: bpy.props.IntProperty(name='Start Frame', description='Start Frame of the Exemplar Moition.', default=1)
end_frame: bpy.props.IntProperty(name='End Frame', description='End Frame of the Exemplar Moition.', default=(- 1))
up_axis: bpy.props.EnumProperty(name='Up Axis', default='Z_UP', description='Up axis of the Exemplar Moition', items=[('Z_UP', 'Z-Up', 'Z Up'), ('Y_UP', 'Y-Up', 'Y Up'), ('X_UP', 'X-Up', 'X Up')])
noise: bpy.props.FloatProperty(name='Noise Intensity', description='Intensity of Noise Added to the Synthesized Motion.', default=10)
num_syn_frames: bpy.props.IntProperty(name='Num. of Frames', description='Number of the Synthesized Motion.', default=600)
patch_size: bpy.props.IntProperty(name='Patch Size', description='Size for Patch Extraction.', min=7, default=15)
coarse_ratio: bpy.props.FloatProperty(name='Coarse Ratio', description='Ratio of the Coarest Pyramid.', min=0.0, default=0.2)
pyr_factor: bpy.props.FloatProperty(name='Pyramid Factor', description='Pyramid Downsample Factor.', min=0.1, default=0.75)
alpha: bpy.props.FloatProperty(name='Completeness Alpha', description='Alpha Value for Completeness/Diversity Trade-off.', default=0.05)
loop: bpy.props.BoolProperty(name='Endless Loop', description='Whether to Use Loop Constrain.', default=False)
num_steps: bpy.props.IntProperty(name='Num of Steps', description='Number of Optimized Steps.', default=5) |
class OneAndOnlyOne(_BaseChildElement):
def __init__(self, nsptagname: str):
super(OneAndOnlyOne, self).__init__(nsptagname, ())
def populate_class_members(self, element_cls: MetaOxmlElement, prop_name: str) -> None:
super(OneAndOnlyOne, self).populate_class_members(element_cls, prop_name)
self._add_getter()
def _getter(self):
def get_child_element(obj: BaseOxmlElement):
child = obj.find(qn(self._nsptagname))
if (child is None):
raise InvalidXmlError(('required ``<%s>`` child element not present' % self._nsptagname))
return child
get_child_element.__doc__ = ('Required ``<%s>`` child element.' % self._nsptagname)
return get_child_element |
def _sync_tensor_states(metric_name: str, state_name: str, my_state_data: torch.Tensor, gathered_states: List[Dict[(str, Dict[(str, Any)])]], process_group: Optional[dist.ProcessGroup], rank: Optional[int]) -> None:
gathered_state_data = send_tensors(my_state_data, group=process_group, rank=rank)
if (gathered_state_data is None):
return
for (i, state_tensor) in enumerate(gathered_state_data):
gathered_states[i][metric_name][state_name] = state_tensor |
def display_comparison(Dict, col=5):
row = 0
end = False
while (not end):
for (key, value_list) in Dict.items():
print(('%10s:' % key), end='')
for i in range(col):
idx = ((row * col) + i)
if isinstance(value_list[idx], float):
print((' %.5f' % value_list[idx]), end=' ')
elif isinstance(value_list[idx], int):
print((' %7d' % value_list[idx]), end=' ')
else:
print(('%12s' % str(value_list[idx])), end=' ')
if ((idx + 1) == len(value_list)):
end = True
break
print('')
row += 1
print('') |
class AdditionsPane(TogglePanel):
def __init__(self, parent, mainFrame):
TogglePanel.__init__(self, parent, force_layout=1)
self.mainFrame = mainFrame
self.SetLabel(_t('Additions'))
pane = self.GetContentPanel()
baseSizer = wx.BoxSizer(wx.HORIZONTAL)
pane.SetSizer(baseSizer)
self.notebook = ChromeNotebook(pane, can_add=False, tabWidthMode=1)
self.notebook.SetMinSize(((- 1), 1000))
baseSizer.Add(self.notebook, 1, wx.EXPAND)
droneImg = BitmapLoader.getImage('drone_small', 'gui')
fighterImg = BitmapLoader.getImage('fighter_small', 'gui')
implantImg = BitmapLoader.getImage('implant_small', 'gui')
boosterImg = BitmapLoader.getImage('booster_small', 'gui')
projectedImg = BitmapLoader.getImage('projected_small', 'gui')
gangImg = BitmapLoader.getImage('fleet_fc_small', 'gui')
cargoImg = BitmapLoader.getImage('cargo_small', 'gui')
notesImg = BitmapLoader.getImage('skill_small', 'gui')
self.drone = DroneView(self.notebook)
self.notebook.AddPage(self.drone, _t('Drones'), image=droneImg, closeable=False)
self.fighter = FighterView(self.notebook)
self.notebook.AddPage(self.fighter, _t('Fighters'), image=fighterImg, closeable=False)
self.cargo = CargoView(self.notebook)
self.notebook.AddPage(self.cargo, _t('Cargo'), image=cargoImg, closeable=False)
self.implant = ImplantView(self.notebook)
self.notebook.AddPage(self.implant, _t('Implants'), image=implantImg, closeable=False)
self.booster = BoosterView(self.notebook)
self.notebook.AddPage(self.booster, _t('Boosters'), image=boosterImg, closeable=False)
self.projectedPage = ProjectedView(self.notebook)
self.notebook.AddPage(self.projectedPage, _t('Projected'), image=projectedImg, closeable=False)
self.gangPage = CommandView(self.notebook)
self.notebook.AddPage(self.gangPage, _t('Command'), image=gangImg, closeable=False)
self.notes = NotesView(self.notebook)
self.notebook.AddPage(self.notes, _t('Notes'), image=notesImg, closeable=False)
self.mainFrame.Bind(GE.FIT_CHANGED, self.OnFitChanged)
self.mainFrame.Bind(GE.FIT_NOTES_CHANGED, self.OnNotesChanged)
self.notebook.SetSelection(0)
PANES = ['Drones', 'Fighters', 'Cargo', 'Implants', 'Boosters', 'Projected', 'Command', 'Notes']
def select(self, name, focus=True):
self.notebook.SetSelection(self.PANES.index(name), focus=focus)
def getName(self, idx):
return self.PANES[idx]
def ToggleContent(self, event):
TogglePanel.ToggleContent(self, event)
h = (self.header_panel.GetSize()[1] + 4)
if self.IsCollapsed():
self.old_pos = self.parent.GetSashPosition()
self.parent.SetMinimumPaneSize(h)
self.parent.SetSashPosition((h * (- 1)), True)
self.parent.SetSashInvisible(True)
else:
self.parent.SetSashInvisible(False)
self.parent.SetMinimumPaneSize(200)
self.parent.SetSashPosition(self.old_pos, True)
def OnFitChanged(self, event):
event.Skip()
activeFitID = self.mainFrame.getActiveFit()
if ((activeFitID is not None) and (activeFitID not in event.fitIDs)):
return
self.updateExtraText()
def OnNotesChanged(self, event):
event.Skip()
self.updateExtraText()
def updateExtraText(self):
refresh = False
for i in range(self.notebook.GetPageCount()):
page = self.notebook.GetPage(i)
if hasattr(page, 'getTabExtraText'):
refresh = True
self.notebook.SetPageTitleExtra(i, (page.getTabExtraText() or ''), refresh=False)
if refresh:
self.notebook.tabs_container.AdjustTabsSize()
self.notebook.Refresh() |
class PageIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
path = indexes.CharField(model_attr='path')
include_template = indexes.CharField()
def get_model(self):
return Page
def prepare_include_template(self, obj):
return 'search/includes/pages.page.html'
def prepare_description(self, obj):
if obj.description:
return obj.description
else:
return striptags(truncatewords_html(obj.content.rendered, 50))
def index_queryset(self, using=None):
return self.get_model().objects.filter(is_published=True) |
class Callback():
def __init__(self):
pass
def set_params(self, params):
self.params = params
def set_trainer(self, model):
self.trainer = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass |
class HspecLexer(HaskellLexer):
name = 'Hspec'
aliases = ['hspec']
filenames = ['*Spec.hs']
mimetypes = []
version_added = '2.4'
tokens = {'root': [('(it)(\\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)), ('(describe)(\\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)), ('(context)(\\s*)("[^"]*")', bygroups(Text, Whitespace, String.Doc)), inherit]} |
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None, extract_compressed_file=False, force_extract=False, local_files_only=False) -> Optional[str]:
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
output_path = get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only)
elif os.path.exists(url_or_filename):
output_path = url_or_filename
elif (urlparse(url_or_filename).scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
if extract_compressed_file:
if ((not is_zipfile(output_path)) and (not tarfile.is_tarfile(output_path))):
return output_path
(output_dir, output_file) = os.path.split(output_path)
output_extract_dir_name = (output_file.replace('.', '-') + '-extracted')
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if (os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and (not force_extract)):
return output_path_extracted
lock_path = (output_path + '.lock')
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, 'r') as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(output_path))
return output_path_extracted
return output_path |
def save_dataset(path, data, format, dicts, src_type):
if (format in ['raw', 'bin']):
print((('Saving data to ' + os.path.join(path, 'data.pt')) + '...'))
save_data = {'type': opt.src_type, 'data': data}
torch.save(save_data, os.path.join(path, 'data.pt'))
print('Done')
elif (format in ['scp', 'scpmem', 'wav']):
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, 'ASR data format is required for this memory indexed format'
for set_ in ['tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if ((set_ not in data) or (data[set_] is None)):
continue
if (opt.data_type == 'int64'):
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, ('data.%s.bin' % set_)), dtype=dtype)
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, ('data.%s.idx' % set_)))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if (data[set_] is not None):
np_array = np.asarray(data[set_])
np.save((os.path.join(path, 'data.%s.npy') % set_), np_array)
else:
print(('Training %s not found ' % set_))
torch.save(data['src'], os.path.join(path, 'data.scp_path.pt'))
if (('prev_src' in data) and (data['prev_src'] is not None)):
torch.save(data['prev_src'], os.path.join(path, 'data.prev_scp_path.pt'))
print('Done')
elif (opt.format in ['mmap', 'mmem']):
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if ((set_ not in data) or (data[set_] is None)):
continue
if (opt.data_type == 'int64'):
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, ('data.%s.bin' % set_)), dtype=dtype)
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, ('data.%s.idx' % set_)))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if (data[set_] is not None):
np_array = np.asarray(data[set_])
np.save(os.path.join(path, ('data.%s.npy' % set_)), np_array)
else:
print(('Set %s not found ' % set_)) |
def retry_with_exponential_backoff(func, initial_delay: float=1, exponential_base: float=2, jitter: bool=True, max_retries: int=10, errors: tuple=(TimeoutError, nemollm.exceptions.ApiException)):
def wrapper(*args, **kwargs):
num_retries = 0
delay = initial_delay
while True:
try:
return func(*args, **kwargs)
except errors as e:
num_retries += 1
if (num_retries > max_retries):
raise Exception(f'Maximum number of retries ({max_retries}) exceeded.')
delay *= (exponential_base * (1 + (jitter * random.random())))
time.sleep(delay)
except Exception as e:
raise e
return wrapper |
def test_kuccsd_supercell_vs_kpts_high_cost():
cell = gto.M(unit='B', a=[[0.0, 3., 3.], [3., 0.0, 3.], [3., 3., 0.0]], mesh=([13] * 3), atom='He 0 0 0\n He 1. 1. 1.', basis=[[0, (1.0, 1.0)], [0, (0.5, 1.0)]], verbose=0)
nmp = [3, 3, 1]
supcell = super_cell(cell, nmp)
gmf = scf.UHF(supcell, exxdiv=None)
ehf = gmf.kernel()
gcc = cc.UCCSD(gmf)
(ecc, t1, t2) = gcc.kernel()
print(('UHF energy (supercell) %f \n' % (float(ehf) / numpy.prod(nmp))))
print(('UCCSD correlation energy (supercell) %f \n' % (float(ecc) / numpy.prod(nmp))))
assert (abs(((ehf / 9) - (- 4.))) < 1e-07)
assert (abs(((ecc / 9) - (- 0.))) < 1e-06)
kpts = cell.make_kpts(nmp)
kpts -= kpts[0]
kmf = scf.KUHF(cell, kpts, exxdiv=None)
ehf = kmf.kernel()
kcc = cc.KUCCSD(kmf)
(ecc, t1, t2) = kcc.kernel()
print(('UHF energy (kpts) %f \n' % ehf))
print(('UCCSD correlation energy (kpts) %f \n' % ecc))
assert (abs((ehf - (- 4.))) < 1e-07)
assert (abs((ecc - (- 0.))) < 1e-06) |
def concat_files(split, src, tgt, extracted_folders, split_urls, path_patterns, to_folder, debug=False):
for lang in [src, tgt]:
to_file = f'{to_folder}/{split}.{src}-{tgt}.{lang}'
(s_src, s_tgt, s_lang) = (src.split('_')[0], tgt.split('_')[0], lang.split('_')[0])
files = []
for url in split_urls:
if isinstance(url, tuple):
(url, downloaded_file) = url
if (str(url) not in extracted_folders):
print(f'warning: {url} not in extracted files')
for extracted_file in set(extracted_glob(extracted_folders[str(url)], path_patterns, s_src, s_tgt, s_lang)):
files.append(extracted_file)
if (len(files) == 0):
print('warning: ', f'No files found for split {to_file}')
continue
files = sorted(set(files))
print(f'concating {len(files)} files into {to_file}')
cmd = ((['cat'] + [f'"{f}"' for f in files]) + [f'>{to_file}'])
cmd = ' '.join(cmd)
call(cmd, debug=debug) |
def test_strict_mode_cmdline(testdir):
testdir.makepyfile(dedent(" import asyncio\n import pytest\n\n pytest_plugins = 'pytest_asyncio'\n\n .asyncio\n async def test_a():\n await asyncio.sleep(0)\n "))
result = testdir.runpytest('--asyncio-mode=strict')
result.assert_outcomes(passed=1) |
class Model(nn.Module):
def __init__(self, args):
super(Model, self).__init__()
self.args = args
self.num_classes = args.num_classes
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.relu1_2 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d(2)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.relu2_1 = nn.ReLU(inplace=True)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.relu2_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(2)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.relu3_1 = nn.ReLU(inplace=True)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.relu3_2 = nn.ReLU(inplace=True)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.relu3_3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(2)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.relu4_1 = nn.ReLU(inplace=True)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.relu4_2 = nn.ReLU(inplace=True)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.relu4_3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(2)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.relu5_1 = nn.ReLU(inplace=True)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.relu5_2 = nn.ReLU(inplace=True)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.relu5_3 = nn.ReLU(inplace=True)
self.pool5 = nn.MaxPool2d(2)
self.avg_pool = nn.AvgPool2d(14)
self.classifier_cls = nn.Sequential(nn.Conv2d(512, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(1024, 200, kernel_size=1, padding=0), nn.ReLU(inplace=True))
self.classifier_loc = nn.Sequential(nn.Conv2d(512, 200, kernel_size=3, padding=1), nn.Sigmoid())
def forward(self, x, label=None, N=1):
conv_copy_5_1 = copy.deepcopy(self.conv5_1)
relu_copy_5_1 = copy.deepcopy(self.relu5_1)
conv_copy_5_2 = copy.deepcopy(self.conv5_2)
relu_copy_5_2 = copy.deepcopy(self.relu5_2)
conv_copy_5_3 = copy.deepcopy(self.conv5_3)
relu_copy_5_3 = copy.deepcopy(self.relu5_3)
classifier_cls_copy = copy.deepcopy(self.classifier_cls)
batch = x.size(0)
x = self.conv1_1(x)
x = self.relu1_1(x)
x = self.conv1_2(x)
x = self.relu1_2(x)
x = self.pool1(x)
x = self.conv2_1(x)
x = self.relu2_1(x)
x = self.conv2_2(x)
x = self.relu2_2(x)
x = self.pool2(x)
x = self.conv3_1(x)
x = self.relu3_1(x)
x = self.conv3_2(x)
x = self.relu3_2(x)
x = self.conv3_3(x)
x = self.relu3_3(x)
x = self.pool3(x)
x = self.conv4_1(x)
x = self.relu4_1(x)
x = self.conv4_2(x)
x = self.relu4_2(x)
x = self.conv4_3(x)
x = self.relu4_3(x)
x_4 = x.clone()
x = self.pool4(x)
x = self.conv5_1(x)
x = self.relu5_1(x)
x = self.conv5_2(x)
x = self.relu5_2(x)
x = self.conv5_3(x)
x = self.relu5_3(x)
x = self.classifier_cls(x)
self.feature_map = x
x = self.avg_pool(x).view(x.size(0), (- 1))
self.score_1 = x
if (N == 1):
p_label = label.unsqueeze((- 1))
else:
(_, p_label) = self.score_1.topk(N, 1, True, True)
self.x_sum = torch.zeros(batch).cuda()
for i in range(batch):
self.x_sum[i] = self.score_1[i][label[i]]
x_saliency_all = self.classifier_loc(x_4)
x_saliency = torch.zeros(batch, 1, 28, 28).cuda()
for i in range(batch):
x_saliency[i][0] = x_saliency_all[i][p_label[i]].mean(0)
self.x_saliency = x_saliency
x_erase = (x_4.detach() * (1 - x_saliency))
x_erase = self.pool4(x_erase)
x_erase = conv_copy_5_1(x_erase)
x_erase = relu_copy_5_1(x_erase)
x_erase = conv_copy_5_2(x_erase)
x_erase = relu_copy_5_2(x_erase)
x_erase = conv_copy_5_3(x_erase)
x_erase = relu_copy_5_3(x_erase)
x_erase = classifier_cls_copy(x_erase)
x_erase = self.avg_pool(x_erase).view(x_erase.size(0), (- 1))
self.x_erase_sum = torch.zeros(batch).cuda()
for i in range(batch):
self.x_erase_sum[i] = x_erase[i][label[i]]
x = (self.feature_map * nn.AvgPool2d(2)(self.x_saliency))
self.score_2 = self.avg_pool(x).squeeze((- 1)).squeeze((- 1))
return (self.score_1, self.score_2)
def bas_loss(self):
batch = self.x_sum.size(0)
x_sum = self.x_sum.clone().detach()
x_res = self.x_erase_sum
res = (x_res / (x_sum + 1e-08))
res[(x_res >= x_sum)] = 0
x_saliency = self.x_saliency
x_saliency = x_saliency.clone().view(batch, (- 1))
x_saliency = x_saliency.mean(1)
loss = (res + (x_saliency * 0.7))
loss = loss.mean(0)
return loss
def normalize_atten_maps(self, atten_maps):
atten_shape = atten_maps.size()
(batch_mins, _) = torch.min(atten_maps.view((atten_shape[0:(- 2)] + ((- 1),))), dim=(- 1), keepdim=True)
(batch_maxs, _) = torch.max(atten_maps.view((atten_shape[0:(- 2)] + ((- 1),))), dim=(- 1), keepdim=True)
atten_normed = torch.div((atten_maps.view((atten_shape[0:(- 2)] + ((- 1),))) - batch_mins), ((batch_maxs - batch_mins) + 1e-10))
atten_normed = atten_normed.view(atten_shape)
return atten_normed |
class RHEL7_TestCase(F18_TestCase):
def runTest(self):
self.assert_parse('timezone --utc')
self.assert_parse('timezone Europe/Sofia')
self.assert_parse('timezone --isUtc')
self.assert_parse('timezone --ntpservers=ntp.cesnet.cz')
self.assert_parse('timezone --ntpservers=ntp.cesnet.cz,tik.nic.cz')
self.assert_parse_error('timezone --blah')
self.assert_parse_error('timezone foo bar', KickstartParseError, 'One or zero arguments are expected for the timezone command')
self.assert_parse_error('timezone --utc foo bar', exception=KickstartParseError)
self.assert_parse_error('timezone', KickstartParseError, 'At least one option and/or an argument are expected for the timezone command')
self.assert_parse_error('timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org') |
def calculate_sentence_transformer_embedding(examples, embedding_model, mean_normal=False):
text_to_encode = [e['history'] for e in examples]
num = len(text_to_encode)
emb_model = INSTRUCTOR(embedding_model)
embeddings = []
bar = tqdm(range(0, num, 20), desc='calculate embeddings')
for i in range(0, num, 20):
embeddings += emb_model.encode(text_to_encode[i:(i + 20)]).tolist()
bar.update(1)
embeddings = torch.tensor(embeddings)
if mean_normal:
mean_embeddings = torch.mean(embeddings, 0, True)
embeddings = (embeddings - mean_embeddings)
return embeddings |
_config
def test_keypress(manager):
manager.test_window('one')
manager.test_window('two')
with pytest.raises(CommandError):
manager.c.simulate_keypress(['unknown'], 'j')
assert (manager.c.get_groups()['a']['focus'] == 'two')
manager.c.simulate_keypress(['control'], 'j')
assert (manager.c.get_groups()['a']['focus'] == 'one') |
def purge_processor(caller):
try:
del caller.ndb.batch_stack
del caller.ndb.batch_stackptr
del caller.ndb.batch_pythonpath
del caller.ndb.batch_batchmode
except Exception:
pass
if caller.ndb.batch_cmdset_backup:
caller.cmdset.cmdset_stack = caller.ndb.batch_cmdset_backup
caller.cmdset.update()
del caller.ndb.batch_cmdset_backup
else:
caller.cmdset.clear()
caller.scripts.validate() |
class _HashableValue(TypedValue):
def can_assign(self, other: Value, ctx: CanAssignContext) -> CanAssign:
if isinstance(other, SubclassValue):
return {}
elif (isinstance(other, TypedValue) and (other.typ is type)):
return {}
elif isinstance(other, KnownValue):
try:
hash(other.val)
except Exception as e:
return CanAssignError(f'{other.val!r} is not hashable', children=[CanAssignError(repr(e))])
else:
return {}
return super().can_assign(other, ctx) |
class Effect6705(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
lvl = src.level
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Rig Shield')), 'drawback', (src.getModifiedItemAttr('rigDrawbackBonus') * lvl), **kwargs) |
class Priors(BaseModel):
Proper_k: float = Field(6.0, description='The initial prior for the proper torsion k values.')
def format_priors(self) -> Dict[(str, Any)]:
data = {}
for (prior, value) in self.__dict__.items():
prior = prior.split('_')
prior = '/'.join(prior)
data[prior] = value
return data |
class CLIPVisionConfig(PretrainedConfig):
model_type = 'clip_vision_model'
def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'clip'):
config_dict = config_dict['vision_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def generate_random_paths(sample_len: int, sample_size: int, mean: float, std: float, leverage: float=1.0):
mean = (mean * leverage)
std = (std * leverage)
time = np.arange(1, (1 + sample_len))
returns_vector = np.random.normal(loc=mean, scale=std, size=((sample_len * sample_size), 1))
returns = np.reshape(returns_vector, (sample_len, sample_size))
return SimpleReturnsDataFrame(data=returns, index=time) |
def test_child_scope():
TestKey = NewType('TestKey', str)
TestKey2 = NewType('TestKey2', str)
def parent_module(binder):
binder.bind(TestKey, to='in parent', scope=singleton)
def first_child_module(binder):
binder.bind(TestKey2, to='in first child', scope=singleton)
def second_child_module(binder):
binder.bind(TestKey2, to='in second child', scope=singleton)
injector = Injector(modules=[parent_module])
first_child_injector = injector.create_child_injector(modules=[first_child_module])
second_child_injector = injector.create_child_injector(modules=[second_child_module])
assert (first_child_injector.get(TestKey) is first_child_injector.get(TestKey))
assert (first_child_injector.get(TestKey) is second_child_injector.get(TestKey))
assert (first_child_injector.get(TestKey2) is not second_child_injector.get(TestKey2)) |
class TestGeostationaryTools():
def test_get_full_geostationary_bbox(self, truncated_geos_area):
nb_points = 20
(x, y) = get_full_geostationary_bounding_box_in_proj_coords(truncated_geos_area, nb_points)
assert (len(x) == nb_points)
assert (len(y) == nb_points)
assert (x[0] != x[(- 1)])
assert (y[0] != y[(- 1)])
expected_x = np.array([(- 5430622.55), (- 5164828.97), (- 4393465.93), (- 3192039.85), (- 1678154.66), 3.e-10, 1678154.66, 3192039.85, 4393465.93, 5164828.97, 5430622.55, 5164828.97, 4393465.93, 3192039.85, 1678154.66, 3.e-10, (- 1678154.66), (- 3192039.85), (- 4393465.93), (- 5164828.97)])
expected_y = np.array([6.e-10, 1672427.79, 3181146.7, 4378472.8, 5147203.48, 5412090.02, 5147203.48, 4378472.8, 3181146.7, 1672427.79, (- 0.0), (- 1672427.79), (- 3181146.7), (- 4378472.8), (- 5147203.48), (- 5412090.02), (- 5147203.48), (- 4378472.8), (- 3181146.7), (- 1672427.79)])
np.testing.assert_allclose(x, expected_x)
np.testing.assert_allclose(y, expected_y)
def test_get_geostationary_bbox_works_with_truncated_area(self, truncated_geos_area):
(lon, lat) = get_geostationary_bounding_box_in_lonlats(truncated_geos_area, 20)
expected_lon = np.array([(- 64.), (- 68.), (- 65.), (- 60.), (- 47.), 9., 66., 79., 84., 87., 83.])
expected_lat = np.array([14., 17., 35., 52., 69., 79., 69., 52., 35., 17., 14.])
np.testing.assert_allclose(lon, expected_lon)
np.testing.assert_allclose(lat, expected_lat)
def test_get_geostationary_bbox_works_with_truncated_area_proj_coords(self, truncated_geos_area):
(x, y) = get_geostationary_bounding_box_in_proj_coords(truncated_geos_area, 20)
expected_x = np.array([(- 5209128.), (- 5164828.), (- 4393465.), (- 3192039.), (- 1678154.), 3.e-10, 1678154., 3192039., 4393465., 5164828., 5209128.])
expected_y = np.array([1393687.2705, 1672427., 3181146., 4378472., 5147203., 5412090., 5147203., 4378472., 3181146., 1672427., 1393687.2705])
np.testing.assert_allclose(x, expected_x)
np.testing.assert_allclose(y, expected_y)
def test_get_geostationary_bbox_does_not_contain_inf(self, truncated_geos_area):
(lon, lat) = get_geostationary_bounding_box_in_lonlats(truncated_geos_area, 20)
assert (not any(np.isinf(lon)))
assert (not any(np.isinf(lat)))
def test_get_geostationary_bbox_returns_empty_lonlats_in_space(self, truncated_geos_area_in_space):
(lon, lat) = get_geostationary_bounding_box_in_lonlats(truncated_geos_area_in_space, 20)
assert (len(lon) == 0)
assert (len(lat) == 0)
def test_get_geostationary_bbox(self):
geos_area = MagicMock()
lon_0 = 0
proj_dict = {'a': 6378169.0, 'b': 6356583.8, 'h': .0, 'lon_0': lon_0, 'proj': 'geos'}
geos_area.crs = CRS(proj_dict)
geos_area.area_extent = [(- 5500000.0), (- 5500000.0), 5500000.0, 5500000.0]
(lon, lat) = get_geostationary_bounding_box_in_lonlats(geos_area, 20)
expected_lon = np.array([(- 78.), (- 75.), (- 70.), (- 56.), 0.0, 56., 70., 75., 78., 79., 78., 75., 70., 56., 0.0, (- 56.), (- 70.), (- 75.), (- 78.), (- 79.)])
expected_lat = np.array([17., 35., 52.5978607, 69., 79., 69., 52.5978607, 35., 17., (- 0.0), (- 17.), (- 35.), (- 52.5978607), (- 69.), (- 79.), (- 69.), (- 52.5978607), (- 35.), (- 17.), 0.0])
np.testing.assert_allclose(lon, expected_lon, atol=1e-07)
np.testing.assert_allclose(lat, expected_lat, atol=1e-07)
geos_area = MagicMock()
lon_0 = 10
proj_dict = {'a': 6378169.0, 'b': 6356583.8, 'h': .0, 'lon_0': lon_0, 'proj': 'geos'}
geos_area.crs = CRS(proj_dict)
geos_area.area_extent = [(- 5500000.0), (- 5500000.0), 5500000.0, 5500000.0]
(lon, lat) = get_geostationary_bounding_box_in_lonlats(geos_area, 20)
np.testing.assert_allclose(lon, (expected_lon + lon_0))
def test_get_geostationary_angle_extent(self):
geos_area = MagicMock()
proj_dict = {'proj': 'geos', 'sweep': 'x', 'lon_0': (- 89.5), 'a': 6378169.0, 'b': 6356583.8, 'h': .0, 'units': 'm'}
geos_area.crs = CRS(proj_dict)
expected = (0., 0.)
np.testing.assert_allclose(expected, get_geostationary_angle_extent(geos_area))
proj_dict['a'] = 1000.0
proj_dict['b'] = 1000.0
proj_dict['h'] = ((np.sqrt(2) * 1000.0) - 1000.0)
geos_area.crs = CRS(proj_dict)
expected = (np.deg2rad(45), np.deg2rad(45))
np.testing.assert_allclose(expected, get_geostationary_angle_extent(geos_area))
proj_dict = {'proj': 'geos', 'sweep': 'x', 'lon_0': (- 89.5), 'ellps': 'GRS80', 'h': .0, 'units': 'm'}
geos_area.crs = CRS(proj_dict)
expected = (0., 0.)
np.testing.assert_allclose(expected, get_geostationary_angle_extent(geos_area)) |
def assert_ne(expected, actual, message=None, tolerance=None, extra=None):
if (tolerance is None):
assert (expected != actual), _assert_fail_message(message, expected, actual, '==', extra)
else:
assert isinstance(tolerance, _number_types), ('tolerance parameter to assert_eq must be a number: %a' % tolerance)
assert (isinstance(expected, _number_types) and isinstance(actual, _number_types)), ('parameters must be numbers when tolerance is specified: %a, %a' % (expected, actual))
diff = abs((expected - actual))
assert (diff > tolerance), _assert_fail_message(message, expected, actual, ('is less than %a away from' % tolerance), extra) |
def _create_dataloaders(config, dataset_class, tf1, tf2, partitions, target_transform=None, shuffle=False):
train_imgs_list = []
for train_partition in partitions:
if ('STL10' == config.dataset):
train_imgs_curr = dataset_class(root=config.dataset_root, transform=tf1, split=train_partition, target_transform=target_transform)
else:
train_imgs_curr = dataset_class(root=config.dataset_root, transform=tf1, train=train_partition, target_transform=target_transform)
if hasattr(config, 'mix_train'):
if (config.mix_train and (train_partition == 'train+unlabeled')):
train_imgs_curr = reorder_train_deterministic(train_imgs_curr)
train_imgs_list.append(train_imgs_curr)
train_imgs = ConcatDataset(train_imgs_list)
train_dataloader = torch.utils.data.DataLoader(train_imgs, batch_size=config.dataloader_batch_sz, shuffle=shuffle, num_workers=0, drop_last=False)
if (not shuffle):
assert isinstance(train_dataloader.sampler, torch.utils.data.sampler.SequentialSampler)
dataloaders = [train_dataloader]
for d_i in xrange(config.num_dataloaders):
print(('Creating auxiliary dataloader ind %d out of %d time %s' % (d_i, config.num_dataloaders, datetime.now())))
sys.stdout.flush()
train_tf_imgs_list = []
for train_partition in partitions:
if ('STL10' == config.dataset):
train_imgs_tf_curr = dataset_class(root=config.dataset_root, transform=tf2, split=train_partition, target_transform=target_transform)
else:
train_imgs_tf_curr = dataset_class(root=config.dataset_root, transform=tf2, train=train_partition, target_transform=target_transform)
if hasattr(config, 'mix_train'):
if (config.mix_train and (train_partition == 'train+unlabeled')):
train_imgs_tf_curr = reorder_train_deterministic(train_imgs_tf_curr)
train_tf_imgs_list.append(train_imgs_tf_curr)
train_imgs_tf = ConcatDataset(train_tf_imgs_list)
train_tf_dataloader = torch.utils.data.DataLoader(train_imgs_tf, batch_size=config.dataloader_batch_sz, shuffle=shuffle, num_workers=0, drop_last=False)
if (not shuffle):
assert isinstance(train_tf_dataloader.sampler, torch.utils.data.sampler.SequentialSampler)
assert (len(train_dataloader) == len(train_tf_dataloader))
dataloaders.append(train_tf_dataloader)
num_train_batches = len(dataloaders[0])
print(('Length of datasets vector %d' % len(dataloaders)))
print(('Number of batches per epoch: %d' % num_train_batches))
sys.stdout.flush()
return dataloaders |
def make_lazy_wikioscar_dataset(tokenizer, probs: Sequence[float]=(0.23, 0.77), shuffle_buffer_size: int=(10 ** 4), shuffle_seed: Optional[int]=None, preprocessing_batch_size: int=256):
wiki = load_dataset('lhoestq/wikipedia_bn', split='train')
oscar = load_dataset('oscar', 'unshuffled_deduplicated_bn', split='train', script_version='streaming')
wiki = wiki.map((lambda x: {'text': x['text'], 'orig': f"wiki[{x['title']}]"}))
oscar = oscar.map((lambda x: {'text': x['text'], 'orig': f"oscar[{x['id']}]"}))
dataset = merge_datasets([wiki, oscar], probabilities=list(probs))
dataset = dataset.shuffle(shuffle_buffer_size, seed=shuffle_seed)
dataset = dataset.map(partial(tokenize_function, tokenizer), batch_size=preprocessing_batch_size)
dataset = dataset.with_format('torch')
return WrappedIterableDataset(dataset) |
class Effect6982(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('Torpedoes')), 'explosiveDamage', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Titan', **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('Torpedoes')), 'emDamage', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Titan', **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('XL Torpedoes')), 'emDamage', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Titan', **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('XL Torpedoes')), 'explosiveDamage', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Titan', **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('XL Cruise Missiles')), 'emDamage', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Titan', **kwargs)
fit.modules.filteredChargeBoost((lambda mod: mod.item.requiresSkill('XL Cruise Missiles')), 'explosiveDamage', src.getModifiedItemAttr('shipBonusTitanG2'), skill='Gallente Titan', **kwargs) |
def is_neg(var):
var_node = var.owner
if (not var_node):
return None
if (var_node.op == neg):
return var_node.inputs[0]
if ((var_node.op == mul) and (len(var_node.inputs) >= 2)):
for (idx, mul_input) in enumerate(var_node.inputs):
try:
constant = get_underlying_scalar_constant_value(mul_input)
is_minus_1 = np.allclose(constant, (- 1))
except NotScalarConstantError:
is_minus_1 = False
if is_minus_1:
if (len(var_node.inputs) == 2):
return var_node.inputs[(1 - idx)]
else:
return mul(*(var_node.inputs[0:idx] + var_node.inputs[(idx + 1):]))
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.